diff -Nru tahoe-lafs-1.9.2/.darcs-boringfile tahoe-lafs-1.10.0/.darcs-boringfile --- tahoe-lafs-1.9.2/.darcs-boringfile 2012-05-14 02:24:30.000000000 +0000 +++ tahoe-lafs-1.10.0/.darcs-boringfile 1970-01-01 00:00:00.000000000 +0000 @@ -1,83 +0,0 @@ -# Boring file regexps: -\.hi$ -\.o$ -\.o\.cmd$ -# *.ko files aren't boring by default because they might -# be Korean translations rather than kernel modules. -# \.ko$ -\.ko\.cmd$ -\.mod\.c$ -(^|/)\.tmp_versions($|/) -(^|/)CVS($|/) -(^|/)RCS($|/) -~$ -#(^|/)\.[^/] -(^|/)_darcs($|/) -\.bak$ -\.BAK$ -\.orig$ -(^|/)vssver\.scc$ -\.swp$ -(^|/)MT($|/) -(^|/)\{arch\}($|/) -(^|/).arch-ids($|/) -(^|/), -\.class$ -\.prof$ -(^|/)\.DS_Store$ -(^|/)BitKeeper($|/) -(^|/)ChangeSet($|/) -(^|/)\.svn($|/) -(^|/)\.git($|/) -\.py[co]$ -\# -\.cvsignore$ -(^|/)Thumbs\.db$ -(^|/)autom4te\.cache($|/) - -^_trial_temp.*($|/) -^\.buildbot($|/) -^MANIFEST$ -^dist($|/) -^debian($|/) - -^build($|/) -^build-stamp$ -^python-build-stamp-2.[4567]$ -^\.coverage$ -^coverage-html($|/) -^twisted/plugins/dropin\.cache$ -^\.coverage\.el$ -^_test_memory($|/) - -# _version.py is generated at build time, and never checked in -^src/allmydata/_version\.py$ -# _appname.py is generated at build time, and never checked in -^src/allmydata/_appname\.py$ - -# bin/tahoe scripts (not including tahoe-script.template) are generated files -^bin/tahoe$ -^bin/tahoe\.pyscript$ -^bin/tahoe-script\.py$ - -# this file is maintained by the buildbot -^\.buildbot-sourcedata$ - -# automatically-built dependencies (using the 'build-deps' target) go here -^support - -# creating a tahoe egg puts files here -allmydata_tahoe.egg-info$ -^Twisted-.*\.egg/ - -# zipped .eggs are boring, contents of .egg directories are not -^.*\.egg$ - -^\.checked-deps$ -^\.built$ - -^misc/dependencies/build($|/) -^misc/dependencies/temp($|/) - -^tahoe-deps($|/) -^tahoe-deps\.tar\.gz$ diff -Nru tahoe-lafs-1.9.2/CREDITS tahoe-lafs-1.10.0/CREDITS --- tahoe-lafs-1.9.2/CREDITS 2012-05-16 23:16:23.000000000 +0000 +++ tahoe-lafs-1.10.0/CREDITS 2013-09-03 15:38:27.000000000 +0000 @@ -15,7 +15,8 @@ E: zooko@zooko.com D: main developer -N: David-Sarah Hopwood +N: Daira Hopwood (formerly David-Sarah Hopwood) +E: daira@jacaranda.org E: david-sarah@jacaranda.org P: 3D6A 08E9 1262 3E9A 00B2 1BDC 067F 4920 98CF 2762 (preferred) P: 12F8 A95C C90B B68E 369C 003D 5947 3C63 3CB3 A807 diff -Nru tahoe-lafs-1.9.2/MANIFEST.in tahoe-lafs-1.10.0/MANIFEST.in --- tahoe-lafs-1.9.2/MANIFEST.in 2012-05-14 02:24:30.000000000 +0000 +++ tahoe-lafs-1.10.0/MANIFEST.in 2013-09-03 15:38:27.000000000 +0000 @@ -1,12 +1,11 @@ include COPYING.GPL COPYING.TGPPL.rst CREDITS Makefile NEWS.rst Tahoe.home -include relnotes.txt .darcs-boringfile +include relnotes.txt include bin/tahoe-script.template recursive-include src *.xhtml *.js *.png *.css recursive-include twisted *.py graft docs graft misc graft static -graft darcsver-1.7.2.egg -graft setuptools-0.6c16dev3.egg +graft setuptools-0.6c16dev4.egg global-exclude *~ *.pyc diff -Nru tahoe-lafs-1.9.2/Makefile tahoe-lafs-1.10.0/Makefile --- tahoe-lafs-1.9.2/Makefile 2012-06-23 23:22:37.000000000 +0000 +++ tahoe-lafs-1.10.0/Makefile 2013-09-03 15:38:27.000000000 +0000 @@ -75,6 +75,18 @@ quicktest: $(TAHOE) debug trial $(TRIALARGS) $(TEST) +# "make tmpfstest" may be a faster way of running tests on Linux. It works best when you have +# at least 330 MiB of free physical memory (to run the whole test suite). Since it uses sudo +# to mount/unmount the tmpfs filesystem, it might prompt for your password. +tmpfstest: + time make _tmpfstest 'TMPDIR=$(shell mktemp -d --tmpdir=.)' + +_tmpfstest: + sudo mount -t tmpfs -o size=400m tmpfs '$(TMPDIR)' + -$(TAHOE) debug trial --rterrors '--temp-directory=$(TMPDIR)/_trial_temp' $(TRIALARGS) $(TEST) + sudo umount '$(TMPDIR)' + rmdir '$(TMPDIR)' + # code-coverage: install the "coverage" package from PyPI, do "make # quicktest-coverage" to do a unit test run with coverage-gathering enabled, # then use "make coverate-output-text" for a brief report, or "make @@ -134,15 +146,15 @@ @echo pyflakes: - $(PYTHON) -OOu `which pyflakes` $(SOURCES) |sort |uniq + @$(PYTHON) -OOu `which pyflakes` $(SOURCES) |sort |uniq @echo check-umids: - $(PYTHON) misc/coding_tools/check-umids.py `find $(SOURCES) -name '*.py'` + $(PYTHON) misc/coding_tools/check-umids.py `find $(SOURCES) -name '*.py' -not -name 'old.py'` @echo -check-umids: - -$(PYTHON) misc/coding_tools/check-umids.py `find $(SOURCES) -name '*.py'` + -$(PYTHON) misc/coding_tools/check-umids.py `find $(SOURCES) -name '*.py' -not -name 'old.py'` @echo doc-checks: check-rst @@ -205,6 +217,10 @@ bench-dirnode: .built $(TAHOE) @src/allmydata/test/bench_dirnode.py +# the provisioning tool runs as a stand-alone webapp server +run-provisioning-tool: .built + $(TAHOE) @misc/operations_helpers/provisioning/run.py + # 'make repl' is a simple-to-type command to get a Python interpreter loop # from which you can type 'import allmydata' repl: @@ -254,9 +270,9 @@ # support/lib/ directory is gone. fetch-and-unpack-deps: - test -f tahoe-deps.tar.gz || wget https://tahoe-lafs.org/source/tahoe/deps/tahoe-deps.tar.gz + test -f tahoe-deps.tar.gz || wget https://tahoe-lafs.org/source/tahoe-lafs/deps/tahoe-lafs-deps.tar.gz rm -rf tahoe-deps - tar xzf tahoe-deps.tar.gz + tar xzf tahoe-lafs-deps.tar.gz test-desert-island: $(MAKE) fetch-and-unpack-deps @@ -272,4 +288,4 @@ $(PYTHON) setup.py sdist --sumo --formats=bztar,gztar,zip upload-tarballs: - @if [ "X${BB_BRANCH}" = "Xtrunk" ] || [ "X${BB_BRANCH}" = "X" ]; then for f in dist/allmydata-tahoe-*; do flappclient --furlfile ~/.tahoe-tarball-upload.furl upload-file $$f; done ; else echo not uploading tarballs because this is not trunk but is branch \"${BB_BRANCH}\" ; fi + @if [ "X${BB_BRANCH}" = "Xmaster" ] || [ "X${BB_BRANCH}" = "X" ]; then for f in dist/allmydata-tahoe-*; do flappclient --furlfile ~/.tahoe-tarball-upload.furl upload-file $$f; done ; else echo not uploading tarballs because this is not trunk but is branch \"${BB_BRANCH}\" ; fi diff -Nru tahoe-lafs-1.9.2/NEWS.rst tahoe-lafs-1.10.0/NEWS.rst --- tahoe-lafs-1.9.2/NEWS.rst 2012-07-03 16:59:22.000000000 +0000 +++ tahoe-lafs-1.10.0/NEWS.rst 2013-09-03 15:38:27.000000000 +0000 @@ -1,13 +1,142 @@ - -================================== +================================== User-Visible Changes in Tahoe-LAFS ================================== +Release 1.10.0 (2013-05-01) +''''''''''''''''''''''''''' + +New Features +------------ + +- The Welcome page has been redesigned. This is a preview of the design style + that is likely to be used in other parts of the WUI in future Tahoe-LAFS + versions. (`#1713`_, `#1457`_, `#1735`_) +- A new extensible Introducer protocol has been added, as the basis for + future improvements such as accounting. Compatibility with older nodes is + not affected. When server, introducer, and client are all upgraded, the + welcome page will show node IDs that start with "v0-" instead of the old + tubid. See ``__ for details. (`#466`_) +- The web-API has a new ``relink`` operation that supports directly moving + files between directories. (`#1579`_) + +Security Improvements +--------------------- + +- The ``introducer.furl`` for new Introducers is now unguessable. In previous + releases, this FURL used a predictable swissnum, allowing a network + eavesdropper who observes any node connecting to the Introducer to access + the Introducer themselves, and thus use servers or offer storage service to + clients (i.e. "join the grid"). In the new code, the only way to join a + grid is to be told the ``introducer.furl`` by someone who already knew it. + Note that pre-existing introducers are not changed. To force an introducer + to generate a new FURL, delete the existing ``introducer.furl`` file and + restart it. After doing this, the ``[client]introducer.furl`` setting of + every client and server that should connect to that introducer must be + updated. Note that other users of a shared machine may be able to read + ``introducer.furl`` from your ``tahoe.cfg`` file unless you configure the + file permissions to prevent them. (`#1802`_) +- Both ``introducer.furl`` and ``helper.furl`` are now censored from the + Welcome page, to prevent users of your gateway from learning enough to + create gateway nodes of their own. For existing guessable introducer + FURLs, the ``introducer`` swissnum is still displayed to show that a + guessable FURL is in use. (`#860`_) + +Command-line Syntax Changes +--------------------------- + +- Global options to ``tahoe``, such as ``-d``/``--node-directory``, must now + come before rather than after the command name (for example, + ``tahoe -d BASEDIR cp -r foo: bar:`` ). (`#166`_) + +Notable Bugfixes +---------------- + +- In earlier versions, if a connection problem caused a download failure for + an immutable file, subsequent attempts to download the same file could also + fail. This is now fixed. (`#1679`_) +- Filenames in WUI directory pages are now displayed correctly when they + contain characters that require HTML escaping. (`#1143`_) +- Non-ASCII node nicknames no longer cause WUI errors. (`#1298`_) +- Checking a LIT file using ``tahoe check`` no longer results in an + exception. (`#1758`_) +- The SFTP frontend now works with recent versions of Twisted, rather than + giving errors or warnings about use of ``IFinishableConsumer``. (`#1926`_, + `#1564`_, `#1525`_) +- ``tahoe cp --verbose`` now counts the files being processed correctly. + (`#1805`_, `#1783`_) +- Exceptions no longer trigger an unhelpful crash reporter on Ubuntu 12.04 + ("Precise") or later. (`#1746`_) +- The error message displayed when a CLI tool cannot connect to a gateway has + been improved. (`#974`_) +- Other minor fixes: `#1781`_, `#1812`_, `#1915`_, `#1484`_, `#1525`_ + +Compatibility and Dependencies +------------------------------ + +- Python >= 2.6, except Python 3 (`#1658`_) +- Twisted >= 11.0.0 (`#1771`_) +- mock >= 0.8 (for unit tests) +- pycryptopp >= 0.6.0 (for Ed25519 signatures) +- zope.interface >= 3.6.0 (except 3.6.3 or 3.6.4) + +Other Changes +------------- + +- The ``flogtool`` utility, used to read detailed event logs, can now be + accessed as ``tahoe debug flogtool`` even when Foolscap is not installed + system-wide. (`#1693`_) +- The provisioning/reliability pages were removed from the main client's web + interface, and moved into a standalone web-based tool. Use the ``run.py`` + script in ``misc/operations_helpers/provisioning/`` to access them. +- Web clients can now cache (ETag) immutable directory pages. (`#443`_) +- ``__ was added to document the adminstration + of convergence secrets. (`#1761`_) + +Precautions when Upgrading +-------------------------- + +- When upgrading a grid from a recent revision of trunk, follow the + precautions from this `message to the tahoe-dev mailing list`_, to ensure + that announcements to the Introducer are recognized after the upgrade. + This is not necessary when upgrading from a previous release like 1.9.2. + +.. _`#166`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/166 +.. _`#443`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/443 +.. _`#466`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/466 +.. _`#860`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/860 +.. _`#974`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/974 +.. _`#1143`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1143 +.. _`#1298`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1298 +.. _`#1457`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1457 +.. _`#1484`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1484 +.. _`#1525`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1525 +.. _`#1564`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1564 +.. _`#1579`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1579 +.. _`#1658`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1658 +.. _`#1679`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1679 +.. _`#1693`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1693 +.. _`#1713`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1713 +.. _`#1735`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1735 +.. _`#1746`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1746 +.. _`#1758`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1758 +.. _`#1761`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1761 +.. _`#1771`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1771 +.. _`#1781`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1781 +.. _`#1783`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1783 +.. _`#1802`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1802 +.. _`#1805`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1805 +.. _`#1812`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1812 +.. _`#1915`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1915 +.. _`#1926`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1926 +.. _`message to the tahoe-dev mailing list`: + https://tahoe-lafs.org/pipermail/tahoe-dev/2013-March/008096.html + + Release 1.9.2 (2012-07-03) ----------------------------- +'''''''''''''''''''''''''' Notable Bugfixes -'''''''''''''''' +---------------- - Several regressions in support for reading (`#1636`_), writing/modifying (`#1670`_, `#1749`_), verifying (`#1628`_) and repairing (`#1655`_, `#1669`_, @@ -25,7 +154,7 @@ computed correctly. (`#1115`_) Configuration/Behavior Changes -'''''''''''''''''''''''''''''' +------------------------------ - The capability of the upload directory for the drop-upload frontend is now specified in the file ``private/drop_upload_dircap`` under @@ -33,13 +162,13 @@ (`#1593`_) Packaging Changes -''''''''''''''''' +----------------- - Tahoe-LAFS can be built correctly from a git repository as well as from darcs. Compatibility and Dependencies -'''''''''''''''''''''''''''''' +------------------------------ - foolscap >= 0.6.3 is required, in order to make Tahoe-LAFS compatible with Twisted >= 11.1.0. (`#1788`_) @@ -65,10 +194,10 @@ Release 1.9.1 (2012-01-12) --------------------------- +'''''''''''''''''''''''''' Security-related Bugfix -''''''''''''''''''''''' +----------------------- - Fix flaw that would allow servers to cause undetected corruption when retrieving the contents of mutable files (both SDMF and MDMF). (`#1654`_) @@ -77,10 +206,10 @@ Release 1.9.0 (2011-10-30) --------------------------- +'''''''''''''''''''''''''' New Features -'''''''''''' +------------ - The most significant new feature in this release is MDMF: "Medium-size Distributed Mutable Files". Unlike standard SDMF files, these provide @@ -106,7 +235,7 @@ can be reached from the Recent Uploads and Downloads page. Configuration/Behavior Changes -'''''''''''''''''''''''''''''' +------------------------------ - Prior to Tahoe-LAFS v1.3, the configuration of some node options could be specified using individual config files rather than via ``tahoe.cfg``. @@ -122,7 +251,7 @@ listing is now labelled "unlink" rather than "del". (`#1104`_) Notable Bugfixes -'''''''''''''''' +---------------- - The security bugfix for the vulnerability allowing deletion of shares, detailed in the news for v1.8.3 below, is also included in this @@ -136,7 +265,7 @@ 5% on a fast network). (`#1268`_) Packaging Changes -''''''''''''''''' +----------------- - The files related to Debian packaging have been removed from the Tahoe source tree, since they are now maintained as part of the official @@ -160,7 +289,7 @@ * Open Software License Compatibility and Dependencies -'''''''''''''''''''''''''''''' +------------------------------ - To resolve an incompatibility between Nevow and zope.interface (versions 3.6.3 and 3.6.4), Tahoe-LAFS now requires an earlier or later @@ -174,7 +303,7 @@ dependency via the "secure_connections" option of foolscap. (`#1383`_) Minor Changes -''''''''''''' +------------- - A ``man`` page has been added (`#1420`_). All other docs are in ReST format. @@ -206,10 +335,10 @@ Release 1.8.3 (2011-09-13) --------------------------- +'''''''''''''''''''''''''' Security-related Bugfix -''''''''''''''''''''''' +----------------------- - Fix flaw that would allow a person who knows a storage index of a file to delete shares of that file. (`#1528`_) @@ -223,10 +352,10 @@ Release 1.8.2 (2011-01-30) --------------------------- +'''''''''''''''''''''''''' Compatibility and Dependencies -'''''''''''''''''''''''''''''' +------------------------------ - Tahoe is now compatible with Twisted-10.2 (released last month), as well as with earlier versions. The previous Tahoe-1.8.1 release @@ -239,7 +368,7 @@ Tahoe code. Other Changes -''''''''''''' +------------- - the default reserved_space value for new storage nodes is 1 GB (`#1208`_) @@ -264,10 +393,10 @@ Release 1.8.1 (2010-10-28) --------------------------- +'''''''''''''''''''''''''' Bugfixes and Improvements -''''''''''''''''''''''''' +------------------------- - Allow the repairer to improve the health of a file by uploading some shares, even if it cannot achieve the configured happiness @@ -298,14 +427,14 @@ script, rather than an obscure tool named 'twistd'). (`#174`_) Removed Features -'''''''''''''''' +---------------- - The tahoe start/stop/restart and node creation commands no longer accept the -m or --multiple option, for consistency between platforms. (`#1262`_) Packaging -''''''''' +--------- - We now host binary packages so that users on certain operating systems can install without having a compiler. @@ -319,7 +448,7 @@ version number. (`#1233`_) Documentation -''''''''''''' +------------- - All current documentation in .txt format has been converted to .rst format. (`#1225`_) @@ -345,10 +474,10 @@ Release 1.8.0 (2010-09-23) --------------------------- +'''''''''''''''''''''''''' New Features -'''''''''''' +------------ - A completely new downloader which improves performance and robustness of immutable-file downloads. It uses the fastest K @@ -367,7 +496,7 @@ Windows. (`#1074`_) Bugfixes and Improvements -''''''''''''''''''''''''' +------------------------- - Document and clean up the command-line options for specifying the node's base directory. (`#188`_, `#706`_, `#715`_, `#772`_, @@ -384,7 +513,7 @@ `#1127`_, `#1129`_, `#1131`_, `#1166`_, `#1175`_) Dependency Updates -'''''''''''''''''' +------------------ - on x86 and x86-64 platforms, pycryptopp >= 0.5.20 - pycrypto 2.2 is excluded due to a bug @@ -414,10 +543,10 @@ .. _`#1175`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1175 Release 1.7.1 (2010-07-18) --------------------------- +'''''''''''''''''''''''''' Bugfixes and Improvements -''''''''''''''''''''''''' +------------------------- - Fix bug in which uploader could fail with AssertionFailure or report that it had achieved servers-of-happiness when it hadn't. (`#1118`_) @@ -439,7 +568,7 @@ - Forward-compatibility improvements for non-ASCII caps (`#1051`_) Code improvements -''''''''''''''''' +----------------- - Simplify and tidy-up directories, unicode support, test code (`#923`_, `#967`_, `#1072`_) @@ -460,10 +589,10 @@ Release 1.7.0 (2010-06-18) --------------------------- +'''''''''''''''''''''''''' New Features -'''''''''''' +------------ - SFTP support (`#1037`_) Your Tahoe-LAFS gateway now acts like a full-fledged SFTP server. It @@ -502,7 +631,7 @@ uploaded). See the `architecture.rst`_ document [3] for details. Bugfixes and Improvements -''''''''''''''''''''''''' +------------------------- - Premature abort of upload if some shares were already present and some servers fail. (`#608`_) @@ -533,14 +662,14 @@ `#1024`_, `#1082`_) Removals -'''''''' +-------- - The 'tahoe debug consolidate' subcommand (for converting old allmydata Windows client backups to a newer format) has been removed. Dependency Updates -'''''''''''''''''' +------------------ - the Python version dependency is raised to 2.4.4 in some cases (2.4.3 for Redhat-based Linux distributions, 2.4.2 for UCS-2 builds) @@ -576,10 +705,10 @@ .. _FTP-and-SFTP.rst: docs/frontends/FTP-and-SFTP.rst Release 1.6.1 (2010-02-27) --------------------------- +'''''''''''''''''''''''''' Bugfixes -'''''''' +-------- - Correct handling of Small Immutable Directories @@ -590,7 +719,7 @@ (`#948`_) Usability Improvements -'''''''''''''''''''''' +---------------------- - Improved user interface messages and error reporting. (`#681`_, `#837`_, `#939`_) @@ -600,10 +729,10 @@ are retained for a further day. (`#577`_) Release 1.6.0 (2010-02-01) --------------------------- +'''''''''''''''''''''''''' New Features -'''''''''''' +------------ - Immutable Directories @@ -740,7 +869,7 @@ heterogeneous servers or geographical dispersion. Minor Changes -''''''''''''' +------------- - The webapi acquired a new "t=mkdir-with-children" command, to create and populate a directory in a single call. This is significantly @@ -829,10 +958,10 @@ .. _webapi.rst: docs/frontends/webapi.rst Release 1.5.0 (2009-08-01) --------------------------- +'''''''''''''''''''''''''' Improvements -'''''''''''' +------------ - Uploads of immutable files now use pipelined writes, improving upload speed slightly (10%) over high-latency connections. (`#392`_) @@ -870,7 +999,7 @@ read or written. Also they cannot generally be copied. (`#683`_) Bugfixes -'''''''' +-------- - deep-check-and-repair now tolerates read-only directories, such as the ones produced by the "tahoe backup" CLI command. Read-only @@ -889,7 +1018,7 @@ partial-information-guessing attack. (`#722`_) Platform/packaging changes -'''''''''''''''''''''''''' +-------------------------- - Tahoe-LAFS now runs on NetBSD, OpenBSD, ArchLinux, and NixOS, and on an embedded system based on an ARM CPU running at 266 MHz. @@ -918,7 +1047,7 @@ architectures. dependency updates -'''''''''''''''''' +------------------ - foolscap-0.4.1 - no python-2.4.0 or 2.4.1 (2.4.2 is good) (they contained a bug in base64.b32decode) @@ -938,10 +1067,10 @@ .. _#752: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/752 Release 1.4.1 (2009-04-13) --------------------------- +'''''''''''''''''''''''''' Garbage Collection -'''''''''''''''''' +------------------ - The big feature for this release is the implementation of garbage collection, allowing Tahoe storage servers to delete shares for old @@ -965,7 +1094,7 @@ crawl. 1.1.0 servers did not have the add-lease operation at all. Security/Usability Problems Fixed -''''''''''''''''''''''''''''''''' +--------------------------------- - A super-linear algorithm in the Merkle Tree code was fixed, which previously caused e.g. download of a 10GB file to take several hours @@ -982,7 +1111,7 @@ hashing the two strings to be compared with a random secret. webapi changes -'''''''''''''' +-------------- - In most cases, HTML tracebacks will only be sent if an "Accept: text/html" header was provided with the HTTP request. This will @@ -1018,7 +1147,7 @@ docs/proposed/lossmodel.lyx . CLI changes -''''''''''' +----------- - "tahoe check" and "tahoe deep-check" now accept an "--add-lease" argument, to update a lease on all shares. This is the "mark" side @@ -1060,13 +1189,13 @@ the results of a "consolidation" operation. other fixes -''''''''''' +----------- - The codebase no longer rauses RuntimeError as a kind of assert(). Specific exception classes were created for each previous instance of RuntimeError. - -Many unit tests were changed to use a non-network test harness, +- Many unit tests were changed to use a non-network test harness, speeding them up considerably. - Deep-traversal operations (manifest and deep-check) now walk @@ -1083,10 +1212,10 @@ .. _garbage-collection.rst: docs/garbage-collection.rst Release 1.3.0 (2009-02-13) --------------------------- +'''''''''''''''''''''''''' Checker/Verifier/Repairer -''''''''''''''''''''''''' +------------------------- - The primary focus of this release has been writing a checker / verifier / repairer for files and directories. "Checking" is the @@ -1165,7 +1294,7 @@ details. Efficient Backup -'''''''''''''''' +---------------- - The "tahoe backup" command is new in this release, which creates efficient versioned backups of a local directory. Given a local @@ -1188,7 +1317,7 @@ $target/Latest) from working. Large Files -''''''''''' +----------- - The 12GiB (approximate) immutable-file-size limitation is lifted. This release knows how to handle so-called "v2 immutable @@ -1204,7 +1333,7 @@ upload shares of a large file to a server which doesn't support it. FTP/SFTP Server -''''''''''''''' +--------------- - Tahoe now includes experimental FTP and SFTP servers. When configured with a suitable method to translate username+password @@ -1218,7 +1347,7 @@ configuration details. (`#512`_, `#531`_) CLI Changes -''''''''''' +----------- - This release adds the 'tahoe create-alias' command, which is a combination of 'tahoe mkdir' and 'tahoe add-alias'. This also allows @@ -1266,7 +1395,7 @@ https://tahoe-lafs.org/trac/tahoe-lafs/ticket/565 for details. Web changes -''''''''''' +----------- - The "default webapi port", used when creating a new client node (and in the getting-started documentation), was changed from 8123 to @@ -1354,7 +1483,7 @@ target) of a t=rename command. Packaging -''''''''' +--------- - Tahoe's dependencies have been extended to require the "[secure_connections]" feature from Foolscap, which will cause @@ -1418,7 +1547,7 @@ is no longer the case in 2.0.x . Grid Management Tools -''''''''''''''''''''' +--------------------- - Several tools have been added or updated in the misc/ directory, mostly munin plugins that can be used to monitor a storage grid. @@ -1456,7 +1585,7 @@ Tahoe implements some form of garbage collection. Configuration Changes: single INI-format tahoe.cfg file -''''''''''''''''''''''''''''''''''''''''''''''''''''''' +------------------------------------------------------- - The Tahoe node is now configured with a single INI-format file, named "tahoe.cfg", in the node's base directory. Most of the @@ -1487,7 +1616,7 @@ prevent the growth of existing mutable shares). Other Changes -''''''''''''' +------------- - Clients now declare which versions of the protocols they support. This is part of a new backwards-compatibility system: @@ -1532,10 +1661,10 @@ .. _#531: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/531 Release 1.2.0 (2008-07-21) --------------------------- +'''''''''''''''''''''''''' Security -'''''''' +-------- - This release makes the immutable-file "ciphertext hash tree" mandatory. Previous releases allowed the uploader to decide whether @@ -1555,7 +1684,7 @@ their shares. Dependencies -'''''''''''' +------------ - Tahoe now requires Foolscap-0.2.9 . It also requires pycryptopp 0.5 or newer, since earlier versions had a bug that interacted with @@ -1565,7 +1694,7 @@ when necessary. Web API -''''''' +------- - Web API directory pages should now contain properly-slash-terminated links to other directories. They have also stopped using absolute @@ -1595,7 +1724,7 @@ work correctly. Checker/Verifier/Repairer -''''''''''''''''''''''''' +------------------------- - Tahoe is slowly acquiring convenient tools to check up on file health, examine existing shares for errors, and repair files that @@ -1610,7 +1739,7 @@ Future releases will improve access to this functionality. Operations/Packaging -'''''''''''''''''''' +-------------------- - A "check-grid" script has been added, along with a Makefile target. This is intended (with the help of a pre-configured node @@ -1636,7 +1765,7 @@ added to match. Other -''''' +----- - Tahoe nodes now use Foolscap "incident logging" to record unusual events to their NODEDIR/logs/incidents/ directory. These incident @@ -1663,10 +1792,10 @@ .. _debian.rst: docs/debian.rst Release 1.1.0 (2008-06-11) --------------------------- +'''''''''''''''''''''''''' CLI: new "alias" model -'''''''''''''''''''''' +---------------------- - The new CLI code uses an scp/rsync -like interface, in which directories in the Tahoe storage grid are referenced by a @@ -1681,7 +1810,7 @@ 'ls' command. Please read `CLI.rst`_ for complete details. wapi: new pages, new commands -''''''''''''''''''''''''''''' +----------------------------- - Several new pages were added to the web API: @@ -1721,14 +1850,14 @@ - tahoe_spacetime New Dependencies -'''''''''''''''' +---------------- - zfec 1.1.0 - foolscap 0.2.8 - pycryptopp 0.5 - setuptools (now required at runtime) New Mutable-File Code -''''''''''''''''''''' +--------------------- - The mutable-file handling code (mostly used for directories) has been completely rewritten. The new scheme has a better API (with a @@ -1745,7 +1874,7 @@ published when in fact it failed. other features -'''''''''''''' +-------------- - The node now monitors its own CPU usage, as a percentage, measured every 60 seconds. 1/5/15 minute moving averages are available on the diff -Nru tahoe-lafs-1.9.2/PKG-INFO tahoe-lafs-1.10.0/PKG-INFO --- tahoe-lafs-1.9.2/PKG-INFO 2012-07-03 18:51:11.000000000 +0000 +++ tahoe-lafs-1.10.0/PKG-INFO 2013-09-03 15:38:27.000000000 +0000 @@ -1,6 +1,6 @@ Metadata-Version: 1.0 Name: allmydata-tahoe -Version: 1.9.2 +Version: 1.10.0 Summary: secure, decentralized, fault-tolerant filesystem Home-page: https://tahoe-lafs.org/ Author: the Tahoe-LAFS project @@ -20,7 +20,7 @@ LICENCE ======= - Copyright 2006-2012 The Tahoe-LAFS Software Foundation + Copyright 2006-2013 The Tahoe-LAFS Software Foundation You may use this package under the GNU General Public License, version 2 or, at your option, any later version. You may use this package under the Transitive @@ -32,9 +32,9 @@ See `TGPPL.PDF`_ for why the TGPPL exists, graphically illustrated on three slides. - .. _quickstart.rst: https://tahoe-lafs.org/source/tahoe-lafs/trunk/docs/quickstart.rst - .. _COPYING.GPL: https://tahoe-lafs.org/trac/tahoe-lafs/browser/COPYING.GPL - .. _COPYING.TGPPL.rst: https://tahoe-lafs.org/trac/tahoe-lafs/browser/COPYING.TGPPL.rst + .. _quickstart.rst: https://github.com/tahoe-lafs/tahoe-lafs/blob/master/docs/quickstart.rst + .. _COPYING.GPL: https://github.com/tahoe-lafs/tahoe-lafs/blob/master/COPYING.GPL + .. _COPYING.TGPPL.rst: https://github.com/tahoe-lafs/tahoe-lafs/blob/master/COPYING.TGPPL.rst .. _TGPPL.PDF: https://tahoe-lafs.org/~zooko/tgppl.pdf Platform: UNKNOWN diff -Nru tahoe-lafs-1.9.2/README.txt tahoe-lafs-1.10.0/README.txt --- tahoe-lafs-1.9.2/README.txt 2012-05-14 02:24:30.000000000 +0000 +++ tahoe-lafs-1.10.0/README.txt 2013-09-03 15:38:27.000000000 +0000 @@ -12,7 +12,7 @@ LICENCE ======= -Copyright 2006-2012 The Tahoe-LAFS Software Foundation +Copyright 2006-2013 The Tahoe-LAFS Software Foundation You may use this package under the GNU General Public License, version 2 or, at your option, any later version. You may use this package under the Transitive @@ -24,7 +24,7 @@ See `TGPPL.PDF`_ for why the TGPPL exists, graphically illustrated on three slides. -.. _quickstart.rst: https://tahoe-lafs.org/source/tahoe-lafs/trunk/docs/quickstart.rst -.. _COPYING.GPL: https://tahoe-lafs.org/trac/tahoe-lafs/browser/COPYING.GPL -.. _COPYING.TGPPL.rst: https://tahoe-lafs.org/trac/tahoe-lafs/browser/COPYING.TGPPL.rst +.. _quickstart.rst: https://github.com/tahoe-lafs/tahoe-lafs/blob/master/docs/quickstart.rst +.. _COPYING.GPL: https://github.com/tahoe-lafs/tahoe-lafs/blob/master/COPYING.GPL +.. _COPYING.TGPPL.rst: https://github.com/tahoe-lafs/tahoe-lafs/blob/master/COPYING.TGPPL.rst .. _TGPPL.PDF: https://tahoe-lafs.org/~zooko/tgppl.pdf diff -Nru tahoe-lafs-1.9.2/bin/tahoe-script.template tahoe-lafs-1.10.0/bin/tahoe-script.template --- tahoe-lafs-1.9.2/bin/tahoe-script.template 2012-05-14 02:24:30.000000000 +0000 +++ tahoe-lafs-1.10.0/bin/tahoe-script.template 2013-09-03 15:38:27.000000000 +0000 @@ -1,7 +1,7 @@ #!/bin/false # You must specify a python interpreter. -u"Tahoe-LAFS does not run under Python 3. Please use a version of Python between 2.4.4 and 2.7.x inclusive." +import sys; assert sys.version_info < (3,), ur"Tahoe-LAFS does not run under Python 3. Please use a version of Python between 2.6 and 2.7.x inclusive." -import sys, os, subprocess +import os, subprocess where = os.path.realpath(sys.argv[0]) base = os.path.dirname(os.path.dirname(where)) @@ -21,13 +21,13 @@ # look for Tahoe.home . homemarker = os.path.join(base, "Tahoe.home") if not os.path.exists(homemarker): - print whoami - print '''\ + print(whoami) + print('''\ I just tried to run and found that I am not living in such a directory, so I am stopping now. To run Tahoe after it has been is installed, please execute my brother, who gets installed into the appropriate place for executables when you run "make install" (perhaps as "%s"). -''' % (perhaps_installed_tahoe,) +''' % (perhaps_installed_tahoe,)) sys.exit(1) # we've found our home. Put the tahoe support/lib etc. in our PYTHONPATH. @@ -63,7 +63,7 @@ # Note that this doesn't escape \x7F. If it did, test_unicode_arguments_and_output # in test_runner.py wouldn't work. def mangle(s): - return str(re.sub(ur'[^\x20-\x7F]', lambda m: u'\x7F%x;' % (ord(m.group(0)),), s)) + return str(re.sub(u'[^\\x20-\\x7F]', lambda m: u'\x7F%x;' % (ord(m.group(0)),), s)) argv = [mangle(argv_unicode[i]) for i in xrange(0, argc.value)] @@ -104,25 +104,25 @@ command = prefix + [script] + args if not os.path.exists(script): - print whoami - print '''\ + print(whoami) + print('''\ I could not find the support script "%s". To run an installed version of Tahoe-LAFS, please execute the "tahoe" script that is installed into the appropriate place for executables when you run "python setup.py install" (perhaps as "%s"). -''' % (script, perhaps_installed_tahoe) +''' % (script, perhaps_installed_tahoe)) sys.exit(1) try: res = subprocess.call(command, env=os.environ) -except Exception, le: - print whoami - print '''\ +except Exception as le: + print(whoami) + print('''\ I just tried to invoke "%s" and got an exception. -''' % (runner,) +''' % (runner,)) raise else: sys.exit(res) diff -Nru tahoe-lafs-1.9.2/darcsver-1.7.2.egg/EGG-INFO/PKG-INFO tahoe-lafs-1.10.0/darcsver-1.7.2.egg/EGG-INFO/PKG-INFO --- tahoe-lafs-1.9.2/darcsver-1.7.2.egg/EGG-INFO/PKG-INFO 2012-05-14 02:07:11.000000000 +0000 +++ tahoe-lafs-1.10.0/darcsver-1.7.2.egg/EGG-INFO/PKG-INFO 1970-01-01 00:00:00.000000000 +0000 @@ -1,121 +0,0 @@ -Metadata-Version: 1.0 -Name: darcsver -Version: 1.7.2 -Summary: generate a version number from darcs history -Home-page: http://tahoe-lafs.org/trac/darcsver -Author: Zooko O'Whielacronx -Author-email: zooko@zooko.com -License: BSD -Description: darcsver - generate version numbers from darcs revision control history - ======================================================================= - - What Does It Do - --------------- - - Create files containing version numbers, based upon the latest darcs - release tag. - - If your source tree is coming from darcs (i.e. it is in a darcs - repository), this tool will determine the most recent release tag, - count the patches that have been applied since then, and compute a - version number to be written into _version.py (and optionally other - version files). This version number will be available by doing: - - from your_package_name import __version__ - - Source trees that do not come from darcs (e.g. release tarballs, nightly - tarballs) and are not within a darcs repository should instead, come with a - _version.py that was generated before the tarball was produced. In this case, - this tool will quietly exit without modifying the existing _version.py . - - 'release tags' are tags in the source repository that match the following - regexp: - - ^your_package_name-(\d+)(\.(\d+)(\.(\d+))?)?((a|b|c|rc)(\d+))? - - - Installation - ------------ - - With easy_install: - - easy_install darcsver - - Alternative manual installation: - - tar -zxvf darcsver-X.Y.Z.tar.gz - cd darcsver-X.Y.Z - python setup.py install - - Where X.Y.Z is a version number. - - Alternative to make a specific package use darcsver without installing - darcsver into the system: - - Put "setup_requires=['darcsver']" in the call to setup() in the - package's setup.py file. - - - Usage - ----- - - There are two ways to use this: the command-line tool and the - setuptools plugin. - - To use the command-line tool, execute it as: - - darcsver $PACKAGE_NAME $PATH_TO_VERSION_PY - - - To use the setuptools plugin (which enables you to write "./setup.py - darcsver" and which cleverly figures out where the _version.py file - ought to go), you must first package your python module with - `setup.py` and use setuptools. - - The former is well documented in the distutils manual: - - http://docs.python.org/dist/dist.html - - To use setuptools instead of distutils, just edit `setup.py` and - change - - from distutils.core import setup - - to - - from setuptools import setup - - - References - ---------- - - How to distribute Python modules with Distutils: - - http://docs.python.org/dist/dist.html - - - Setuptools complete manual: - - http://peak.telecommunity.com/DevCenter/setuptools - - - Thanks to Yannick Gingras for providing the prototype for this - README.txt. - -Keywords: distutils setuptools plugin setup darcs -Platform: UNKNOWN -Classifier: Framework :: Setuptools Plugin -Classifier: Development Status :: 5 - Production/Stable -Classifier: License :: OSI Approved :: BSD License -Classifier: License :: DFSG approved -Classifier: Intended Audience :: Developers -Classifier: Operating System :: OS Independent -Classifier: Natural Language :: English -Classifier: Programming Language :: Python -Classifier: Programming Language :: Python :: 2 -Classifier: Programming Language :: Python :: 2.4 -Classifier: Programming Language :: Python :: 2.5 -Classifier: Programming Language :: Python :: 2.6 -Classifier: Programming Language :: Python :: 2.7 -Classifier: Topic :: Utilities -Classifier: Topic :: Software Development :: Libraries diff -Nru tahoe-lafs-1.9.2/darcsver-1.7.2.egg/EGG-INFO/SOURCES.txt tahoe-lafs-1.10.0/darcsver-1.7.2.egg/EGG-INFO/SOURCES.txt --- tahoe-lafs-1.9.2/darcsver-1.7.2.egg/EGG-INFO/SOURCES.txt 2012-05-14 02:07:11.000000000 +0000 +++ tahoe-lafs-1.10.0/darcsver-1.7.2.egg/EGG-INFO/SOURCES.txt 1970-01-01 00:00:00.000000000 +0000 @@ -1,16 +0,0 @@ -README.txt -setup.py -darcsver/__init__.py -darcsver/_version.py -darcsver/darcsvermodule.py -darcsver/setuptools_command.py -darcsver.egg-info/PKG-INFO -darcsver.egg-info/SOURCES.txt -darcsver.egg-info/dependency_links.txt -darcsver.egg-info/entry_points.txt -darcsver.egg-info/not-zip-safe -darcsver.egg-info/top_level.txt -darcsver/test/__init__.py -darcsver/test/test_darcsver.py -scripts/__init__.py -scripts/darcsverscript.py \ No newline at end of file diff -Nru tahoe-lafs-1.9.2/darcsver-1.7.2.egg/EGG-INFO/dependency_links.txt tahoe-lafs-1.10.0/darcsver-1.7.2.egg/EGG-INFO/dependency_links.txt --- tahoe-lafs-1.9.2/darcsver-1.7.2.egg/EGG-INFO/dependency_links.txt 2012-05-14 02:07:11.000000000 +0000 +++ tahoe-lafs-1.10.0/darcsver-1.7.2.egg/EGG-INFO/dependency_links.txt 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ - diff -Nru tahoe-lafs-1.9.2/darcsver-1.7.2.egg/EGG-INFO/entry_points.txt tahoe-lafs-1.10.0/darcsver-1.7.2.egg/EGG-INFO/entry_points.txt --- tahoe-lafs-1.9.2/darcsver-1.7.2.egg/EGG-INFO/entry_points.txt 2012-05-14 02:07:11.000000000 +0000 +++ tahoe-lafs-1.10.0/darcsver-1.7.2.egg/EGG-INFO/entry_points.txt 1970-01-01 00:00:00.000000000 +0000 @@ -1,10 +0,0 @@ -[distutils.setup_keywords] -versionbodies = darcsver.setuptools_command:validate_versionbodies -versionfiles = darcsver.setuptools_command:validate_versionfiles - -[console_scripts] -darcsver = scripts.darcsverscript:main - -[distutils.commands] -darcsver = darcsver.setuptools_command:DarcsVer - diff -Nru tahoe-lafs-1.9.2/darcsver-1.7.2.egg/EGG-INFO/not-zip-safe tahoe-lafs-1.10.0/darcsver-1.7.2.egg/EGG-INFO/not-zip-safe --- tahoe-lafs-1.9.2/darcsver-1.7.2.egg/EGG-INFO/not-zip-safe 2012-05-14 02:07:11.000000000 +0000 +++ tahoe-lafs-1.10.0/darcsver-1.7.2.egg/EGG-INFO/not-zip-safe 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ - diff -Nru tahoe-lafs-1.9.2/darcsver-1.7.2.egg/EGG-INFO/top_level.txt tahoe-lafs-1.10.0/darcsver-1.7.2.egg/EGG-INFO/top_level.txt --- tahoe-lafs-1.9.2/darcsver-1.7.2.egg/EGG-INFO/top_level.txt 2012-05-14 02:07:11.000000000 +0000 +++ tahoe-lafs-1.10.0/darcsver-1.7.2.egg/EGG-INFO/top_level.txt 1970-01-01 00:00:00.000000000 +0000 @@ -1,2 +0,0 @@ -darcsver -scripts diff -Nru tahoe-lafs-1.9.2/darcsver-1.7.2.egg/darcsver/__init__.py tahoe-lafs-1.10.0/darcsver-1.7.2.egg/darcsver/__init__.py --- tahoe-lafs-1.9.2/darcsver-1.7.2.egg/darcsver/__init__.py 2012-05-14 02:07:11.000000000 +0000 +++ tahoe-lafs-1.10.0/darcsver-1.7.2.egg/darcsver/__init__.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,8 +0,0 @@ -__version__ = "unknown" -try: - from _version import __version__ -except ImportError: - # We're running in a tree that hasn't run darcsver, and didn't come with a - # _version.py, so we don't know what our version is. This should not happen - # very often. - pass diff -Nru tahoe-lafs-1.9.2/darcsver-1.7.2.egg/darcsver/_version.py tahoe-lafs-1.10.0/darcsver-1.7.2.egg/darcsver/_version.py --- tahoe-lafs-1.9.2/darcsver-1.7.2.egg/darcsver/_version.py 2012-05-14 02:07:11.000000000 +0000 +++ tahoe-lafs-1.10.0/darcsver-1.7.2.egg/darcsver/_version.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,17 +0,0 @@ - -# This is the version of this tree, as created by setup.py darcsver from the darcs patch -# information: the main version number is taken from the most recent release -# tag. If some patches have been added since the last release, this will have a -# -NN "build number" suffix, or else a -rNN "revision number" suffix. Please see -# pyutil.version_class for a description of what the different fields mean. - -__pkgname__ = "darcsver" -verstr = "1.7.2" -try: - from pyutil.version_class import Version as pyutil_Version - __version__ = pyutil_Version(verstr) -except (ImportError, ValueError): - # Maybe there is no pyutil installed, or this may be an older version of - # pyutil.version_class which does not support SVN-alike revision numbers. - from distutils.version import LooseVersion as distutils_Version - __version__ = distutils_Version(verstr) diff -Nru tahoe-lafs-1.9.2/darcsver-1.7.2.egg/darcsver/darcsvermodule.py tahoe-lafs-1.10.0/darcsver-1.7.2.egg/darcsver/darcsvermodule.py --- tahoe-lafs-1.9.2/darcsver-1.7.2.egg/darcsver/darcsvermodule.py 2012-05-14 02:07:11.000000000 +0000 +++ tahoe-lafs-1.10.0/darcsver-1.7.2.egg/darcsver/darcsvermodule.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,200 +0,0 @@ -import os, string, sys, re -import xml.dom.minidom -import subprocess -PIPE=subprocess.PIPE -from distutils import log - -def all(iterable): - for thing in iterable: - if not thing: - return False - return True - -OUR_VERSION_BASE_RE_STR="(\d+)(\.(\d+)(\.(\d+))?)?((a|b|c)(\d+))?(\.dev(\d+))?" -try: - # If we can import pyutil.version_class then use its regex. - from pyutil import version_class - VERSION_BASE_RE_STR = version_class.VERSION_BASE_RE_STR -except (ImportError, AttributeError): - # Else (perhaps a bootstrapping problem),then we'll use this - # regex, which was copied from the pyutil source code on - # 2010-09-02. - VERSION_BASE_RE_STR=OUR_VERSION_BASE_RE_STR - -def get_text(nodelist): - rc = "" - for node in nodelist: - if node.nodeType == node.TEXT_NODE: - rc = rc + node.data - return rc - -VERSION_BODY = ''' -# This is the version of this tree, as created by %(versiontool)s from the darcs patch -# information: the main version number is taken from the most recent release -# tag. If some patches have been added since the last release, this will have a -# -NN "build number" suffix, or else a -rNN "revision number" suffix. Please see -# pyutil.version_class for a description of what the different fields mean. - -__pkgname__ = "%(pkgname)s" -verstr = "%(pkgversion)s" -try: - from pyutil.version_class import Version as pyutil_Version - __version__ = pyutil_Version(verstr) -except (ImportError, ValueError): - # Maybe there is no pyutil installed. - from distutils.version import LooseVersion as distutils_Version - __version__ = distutils_Version(verstr) -''' - -def write_version_py(verstr, outfname, EXE_NAME, version_body, pkgname): - f = open(outfname, "wb+") - f.write(version_body % { - 'versiontool': EXE_NAME, - 'pkgversion': verstr, - 'pkgname': pkgname, - }) - f.close() - -def read_version_py(infname): - try: - verstrline = open(infname, "rt").read() - except EnvironmentError: - return None - else: - VSRE = r"^verstr = ['\"]([^'\"]*)['\"]" - mo = re.search(VSRE, verstrline, re.M) - if mo: - return mo.group(1) - -def update(pkgname, verfilename, revision_number=False, loud=False, abort_if_snapshot=False, EXE_NAME="darcsver", version_body=VERSION_BODY): - """ - @param revision_number If true, count the total number of patches in all - history. If false, count the total number of patches since the most recent - release tag. - - Returns a tuple of (exit code, new version string). - """ - if isinstance(verfilename, basestring): - verfilenames = [verfilename] - else: - verfilenames = verfilename - assert all([isinstance(vfn, basestring) for vfn in verfilenames]), [vfn for vfn in verfilenames if not isinstance(vfn, basestring)] - if isinstance(version_body, basestring): - verbodies = [version_body] - else: - verbodies = version_body - rc = -1 - - # First we try "darcs query repo" because if that fails then we - # won't try "darcs changes" at all, because "darcs changes" emits - # an ugly error message when run in not-a-repo. - try: - p = subprocess.Popen(["darcs", 'query', 'repo'], stdout=PIPE, stderr=PIPE, universal_newlines=True) - except OSError, ose: - if ose.errno == 2 and '~' in os.environ['PATH']: - expanded_path = os.environ['PATH'].replace('~', os.path.expanduser('~')) - msg = ("WARNING: 'darcs' was not found. However '~' was found in your PATH. \n" - "Please note that bugs in python cause it to fail to traverse '~' in \n" - "the user's PATH. Please fix your path, e.g. \nPATH=%s" ) - log.warn(msg % (expanded_path,)) - pass - else: - (output, errput) = p.communicate() - rc = p.returncode - - if rc == 0: - cmd = ["changes", "--xml-output"] - if not revision_number: - cmd.append("--from-tag=^%s" % (pkgname,)) - try: - p = subprocess.Popen(["darcs"] + cmd, stdout=PIPE, stderr=PIPE, universal_newlines=True) - except OSError: - pass - else: - (output, errput) = p.communicate() - rc = p.returncode - if rc != 0 and errput: - log.info("%s: darcs wrote to stderr: '%s'" % (EXE_NAME, errput,)) - errput = None - else: - if all([os.path.exists(vfn) for vfn in verfilenames]): - log.info("%s: using extant version file %s" % (EXE_NAME, verfilenames)) - return (0, read_version_py(verfilenames[0])) - else: - log.warn("%s: didn't find version tags with darcs, and %s don't exist." % (EXE_NAME, verfilenames)) - return (rc, None) - - # Filter out bad chars that can cause the XML parser to give up in despair. - # (Thanks to lelit of the tailor project and ndurner and warner for this hack.) - allbadchars = "".join([chr(i) for i in range(0x0a) + [0x0b, 0x0c] + range(0x0e, 0x20) + range(0x7f,0x100)]) - tt = string.maketrans(allbadchars, "-"*len(allbadchars)) - output = output.translate(tt) - regexstr = "^TAG %s-(%s)$" % (pkgname, VERSION_BASE_RE_STR) - last_tag = None - - # strip off trailing warning messages that darcs 2.3.1 writes to stdout - endi = output.find("")+len("") - if endi != -1: - output = output[:endi] - try: - doc = xml.dom.minidom.parseString(output) - except xml.parsers.expat.ExpatError: - # Okay maybe this is an error message instead of an XML output. - pass - else: - changelog = doc.getElementsByTagName("changelog")[0] - patches = changelog.getElementsByTagName("patch") - version_re = re.compile(regexstr) - count_since_last_patch = 0 - if abort_if_snapshot: - for patch in patches: - name = get_text(patch.getElementsByTagName("name")[0].childNodes) - m = version_re.match(name) - if m: - last_tag = m.group(1) - last_tag = last_tag.encode("utf-8") - break - else: - sys.exit(0) # because abort_if_snapshot - else: - for patch in patches: - name = get_text(patch.getElementsByTagName("name")[0].childNodes) - m = version_re.match(name) - if m: - last_tag = m.group(1) - last_tag = last_tag.encode("utf-8") - break - else: - count_since_last_patch += 1 - - if not last_tag: - if errput: - log.info("%s: darcs wrote to stderr: '%s'" % (EXE_NAME, errput,)) - errput = None - assert all([isinstance(vfn, basestring) for vfn in verfilenames]), [vfn for vfn in verfilenames if not isinstance(vfn, basestring)] - if all([os.path.exists(vfn) for vfn in verfilenames]): - log.warn("%s: I'm unable to find a tag in the darcs history matching \"%s\", so I'm leaving %s alone." % (EXE_NAME, regexstr, verfilenames,)) - return (0, read_version_py(verfilenames[0])) - else: - log.warn("%s: I'm unable to find a tag in the darcs history matching \"%s\", and %s don't exist." % (EXE_NAME, regexstr, verfilenames,)) - return (-1, None) - - if revision_number: - if count_since_last_patch: - # this is an interim version - verstr = "%s-r%d" % (last_tag, len(patches)) - else: - # this is a release - verstr = last_tag - else: - if count_since_last_patch: - # this is an interim version - verstr = "%s-%d" % (last_tag, count_since_last_patch) - else: - # this is a release - verstr = last_tag - - for verfn, verbod in zip(verfilenames, verbodies): - write_version_py(verstr, verfn, EXE_NAME, verbod, pkgname) - log.info("%s: wrote '%s' into %s" % (EXE_NAME, verstr, verfn,)) - return (0, verstr) diff -Nru tahoe-lafs-1.9.2/darcsver-1.7.2.egg/darcsver/setuptools_command.py tahoe-lafs-1.10.0/darcsver-1.7.2.egg/darcsver/setuptools_command.py --- tahoe-lafs-1.9.2/darcsver-1.7.2.egg/darcsver/setuptools_command.py 2012-05-14 02:07:11.000000000 +0000 +++ tahoe-lafs-1.10.0/darcsver-1.7.2.egg/darcsver/setuptools_command.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,126 +0,0 @@ -import os - -import setuptools - -from darcsver import darcsvermodule - -from distutils.errors import DistutilsSetupError - -def validate_string_or_iter_of_strings(dist, attr, value): - # value is required to be a string or else a list of strings - if isinstance(value, basestring): - return - try: - for thing in value: - if not isinstance(thing, basestring): - raise DistutilsSetupError("%r is required to be a string or an iterable of strings (got %r)" % (attr, value)) - except TypeError: - raise DistutilsSetupError("%r is required to be a string or an iterable of strings (got %r)" % (attr, value)) - -def validate_versionfiles(dist, attr, value): - return validate_string_or_iter_of_strings(dist, attr, value) - -def validate_versionbodies(dist, attr, value): - return validate_string_or_iter_of_strings(dist, attr, value) - -def all(iterator): - for thing in iterator: - if not thing: - return False - return True - -PYTHON_VERSION_BODY=''' -# This is the version of this tree, as created by %(versiontool)s from the darcs patch -# information: the main version number is taken from the most recent release -# tag. If some patches have been added since the last release, this will have a -# -NN "build number" suffix, or else a -rNN "revision number" suffix. Please see -# pyutil.version_class for a description of what the different fields mean. - -__pkgname__ = "%(pkgname)s" -verstr = "%(pkgversion)s" -try: - from pyutil.version_class import Version as pyutil_Version - __version__ = pyutil_Version(verstr) -except (ImportError, ValueError): - # Maybe there is no pyutil installed, or this may be an older version of - # pyutil.version_class which does not support SVN-alike revision numbers. - from distutils.version import LooseVersion as distutils_Version - __version__ = distutils_Version(verstr) -''' - -class DarcsVer(setuptools.Command): - description = "generate a version number from darcs history" - user_options = [ - ('project-name', None, "name of the project as it appears in the project's release tags (default's the to the distribution name)"), - ('filename', None, "path to file into which the version number should be written (defaults to the package directory's _version.py)"), - ('count-all-patches', None, "If true, count the total number of patches in all history. If false, count the total number of patches since the most recent release tag."), - ('abort-if-snapshot', None, "If true, the if the current version is a snapshot (not a release tag), then immediately exit the process with exit code 0."), - ] - - def initialize_options(self): - self.project_name = None - self.filename = None - self.count_all_patches = None - self.abort_if_snapshot = None - - def finalize_options(self): - if self.project_name is None: - self.project_name = self.distribution.get_name() - - # If the user passed --filename on the cmdline, override - # the setup.py's versionfiles argument. - if self.filename is not None: - if not isinstance(self.filename, basestring): - raise TypeError("filename is required to be a string, not %s, filename: %s" % (type(self.filename), self.filename)) - self.distribution.versionfiles = [self.filename] - - if self.abort_if_snapshot is None: - self.abort_if_snapshot=False - - def run(self): - if self.distribution.versionfiles is None: - toppackage = '' - # If there is a package with the same name as the project name and - # there is a directory by that name then use that. - packagedir = None - if self.distribution.packages and self.project_name in self.distribution.packages: - toppackage = self.project_name - srcdir = '' - if self.distribution.package_dir: - srcdir = self.distribution.package_dir.get(toppackage) - if not srcdir is None: - srcdir = self.distribution.package_dir.get('', '') - packagedir = os.path.join(srcdir, toppackage) - - if packagedir is None or not os.path.isdir(packagedir): - # Else, if there is a singly-rooted tree of packages, use the - # root of that. - if self.distribution.packages: - for package in self.distribution.packages: - if not toppackage: - toppackage = package - else: - if toppackage.startswith(package+"."): - toppackage = package - else: - if not package.startswith(toppackage+"."): - # Not singly-rooted - toppackage = '' - break - - srcdir = '' - if self.distribution.package_dir: - srcdir = self.distribution.package_dir.get(toppackage) - if srcdir is None: - srcdir = self.distribution.package_dir.get('', '') - packagedir = os.path.join(srcdir, toppackage) - - self.distribution.versionfiles = [os.path.join(packagedir, '_version.py')] - - if self.distribution.versionbodies is None: - self.distribution.versionbodies = [PYTHON_VERSION_BODY] - - assert all([isinstance(vfn, basestring) for vfn in self.distribution.versionfiles]), self.distribution.versionfiles - (rc, verstr) = darcsvermodule.update(self.project_name, self.distribution.versionfiles, self.count_all_patches, abort_if_snapshot=self.abort_if_snapshot, EXE_NAME="setup.py darcsver", version_body=self.distribution.versionbodies) - if rc == 0: - self.distribution.metadata.version = verstr diff -Nru tahoe-lafs-1.9.2/darcsver-1.7.2.egg/scripts/darcsverscript.py tahoe-lafs-1.10.0/darcsver-1.7.2.egg/scripts/darcsverscript.py --- tahoe-lafs-1.9.2/darcsver-1.7.2.egg/scripts/darcsverscript.py 2012-05-14 02:07:12.000000000 +0000 +++ tahoe-lafs-1.10.0/darcsver-1.7.2.egg/scripts/darcsverscript.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,46 +0,0 @@ -#! /usr/bin/env python - -from optparse import OptionParser -import os, sys - -from darcsver import darcsvermodule, __version__ - -try: - EXE_NAME=os.path.basename(sys.argv[0]) -except: - EXE_NAME="darcsver" - -def main(): - parser = OptionParser(usage="Usage: %prog [options] [pkgname [verfilename]]", - version="%prog " + str(__version__), - prog=EXE_NAME) - parser.add_option("-q", "--quiet", default=False, action="store_true", - help="Be quiet, do the job without any output.") - parser.add_option("--count-all-patches", "--revision-number", default=False, - action="store_true", dest="count_all_patches", - help="By default %s counts the number of patches since the " - "most recent release tag. With this option, it counts " - "all the patches in the repository." % EXE_NAME) - - options, args = parser.parse_args() - - if args: - pkgname = args.pop(0) - else: - pkgname = os.path.basename(os.getcwd()) - if not options.quiet: - print "%s: You didn't pass a pkg-name on the command-line, so I'm going to take the name of the current working directory: \"%s\"" % (EXE_NAME, pkgname,) - - if args: - verfilename = args.pop(0) - else: - verfilename = os.path.join(pkgname, "_version.py") - if not options.quiet: - print "%s: You didn't pass a verfilename on the command-line, so I'm going to build one from the name of the package: \"%s\"" % (EXE_NAME, verfilename,) - - (rc, newverstr) = darcsvermodule.update(pkgname=pkgname, verfilename=verfilename, revision_number=options.count_all_patches, quiet=options.quiet, EXE_NAME=EXE_NAME) - return rc - -if __name__ == "__main__": - rc = main() - sys.exit(rc) diff -Nru tahoe-lafs-1.9.2/darcsver-1.7.2.egg/share/doc/python-darcsver/README.txt tahoe-lafs-1.10.0/darcsver-1.7.2.egg/share/doc/python-darcsver/README.txt --- tahoe-lafs-1.9.2/darcsver-1.7.2.egg/share/doc/python-darcsver/README.txt 2012-05-14 02:07:12.000000000 +0000 +++ tahoe-lafs-1.10.0/darcsver-1.7.2.egg/share/doc/python-darcsver/README.txt 1970-01-01 00:00:00.000000000 +0000 @@ -1,95 +0,0 @@ -darcsver - generate version numbers from darcs revision control history -======================================================================= - -What Does It Do ---------------- - -Create files containing version numbers, based upon the latest darcs -release tag. - -If your source tree is coming from darcs (i.e. it is in a darcs -repository), this tool will determine the most recent release tag, -count the patches that have been applied since then, and compute a -version number to be written into _version.py (and optionally other -version files). This version number will be available by doing: - - from your_package_name import __version__ - -Source trees that do not come from darcs (e.g. release tarballs, nightly -tarballs) and are not within a darcs repository should instead, come with a -_version.py that was generated before the tarball was produced. In this case, -this tool will quietly exit without modifying the existing _version.py . - -'release tags' are tags in the source repository that match the following -regexp: - - ^your_package_name-(\d+)(\.(\d+)(\.(\d+))?)?((a|b|c|rc)(\d+))? - - -Installation ------------- - -With easy_install: - - easy_install darcsver - -Alternative manual installation: - - tar -zxvf darcsver-X.Y.Z.tar.gz - cd darcsver-X.Y.Z - python setup.py install - -Where X.Y.Z is a version number. - -Alternative to make a specific package use darcsver without installing -darcsver into the system: - - Put "setup_requires=['darcsver']" in the call to setup() in the - package's setup.py file. - - -Usage ------ - -There are two ways to use this: the command-line tool and the -setuptools plugin. - -To use the command-line tool, execute it as: - -darcsver $PACKAGE_NAME $PATH_TO_VERSION_PY - - -To use the setuptools plugin (which enables you to write "./setup.py -darcsver" and which cleverly figures out where the _version.py file -ought to go), you must first package your python module with -`setup.py` and use setuptools. - -The former is well documented in the distutils manual: - - http://docs.python.org/dist/dist.html - -To use setuptools instead of distutils, just edit `setup.py` and -change - - from distutils.core import setup - -to - - from setuptools import setup - - -References ----------- - -How to distribute Python modules with Distutils: - - http://docs.python.org/dist/dist.html - - -Setuptools complete manual: - - http://peak.telecommunity.com/DevCenter/setuptools - - -Thanks to Yannick Gingras for providing the prototype for this -README.txt. diff -Nru tahoe-lafs-1.9.2/debian/README.Debian tahoe-lafs-1.10.0/debian/README.Debian --- tahoe-lafs-1.9.2/debian/README.Debian 1970-01-01 00:00:00.000000000 +0000 +++ tahoe-lafs-1.10.0/debian/README.Debian 2014-01-18 01:59:58.000000000 +0000 @@ -0,0 +1,48 @@ + +# Using Tahoe-lafs in Debian +============================ + +Since package version 1.10.0-1, tahoe-lafs is shipped with a initscript that +helps sysadmins to manage the nodes installed on the system. + +To be used, this initscript requires that the tahoe node directory is stored +under `/var/lib/tahoe-lafs/`. + +The initscript scans /var/lib/tahoe-lafs/ to find nodes configuration, and use +each node directory owner as the uid of the node's tahoe process. This helps in +having a bit more isolation between them. + +In order to configure a node managed by this initscript, Use this steps (as +root): + +Setup some helpful variables first. Don't use blank spaces in the node nick, +otherwise the initscript will fail. + + # export NICK="nickname" + # export NODE_USER="fillinausername" + # export BASEDIR="/var/lib/tahoe-lafs/${NICK}" + +Add a system user + + # adduser --system --home "${BASEDIR}" "${NODE_USER}" + +Create your node, using the tahoe `-d` option, eg: + + # tahoe -d "${BASEDIR}" create-client -n "${NICK}" + +Edit the configuration: + + # view "${BASEDIR}"/tahoe.cfg + +Fix permissions + + # chown -R "${NODE_USER}":nogroup "${BASEDIR}" + +And even tighten them. + + # find "${BASEDIR}" -type d -exec chmod 700 {} \; + # find "${BASEDIR}" -type f -exec chmod 600 {} \; + + +Use /etc/default/tahoe-lafs to configure the initscript and explain it which +nodes to start at boot time. diff -Nru tahoe-lafs-1.9.2/debian/changelog tahoe-lafs-1.10.0/debian/changelog --- tahoe-lafs-1.9.2/debian/changelog 2012-07-05 19:14:00.000000000 +0000 +++ tahoe-lafs-1.10.0/debian/changelog 2014-01-18 02:07:29.000000000 +0000 @@ -1,3 +1,16 @@ +tahoe-lafs (1.10.0-1) unstable; urgency=low + + [Bert Agaz] + * New upstream release. + * Add missing dependency on net-tools (Closes: #683331). + * Import upstream patch to support kFreeBSD. (Closes: #700239). + * Standards version bump. + * Add sysvinit script (Closes: #652003). + * Include a README.Debian to document how to manage nodes in Debian. + * Use dh_installchangelogs to properly install upstream NEWS.rst. + + -- Micah Anderson Fri, 17 Jan 2014 21:07:24 -0500 + tahoe-lafs (1.9.2-1) unstable; urgency=low [ Bert Agaz ] diff -Nru tahoe-lafs-1.9.2/debian/control tahoe-lafs-1.10.0/debian/control --- tahoe-lafs-1.9.2/debian/control 2012-07-05 19:14:00.000000000 +0000 +++ tahoe-lafs-1.10.0/debian/control 2014-01-18 01:59:58.000000000 +0000 @@ -5,7 +5,7 @@ Uploaders: Micah Anderson Build-Depends: debhelper (>= 9), python-setuptools, python-all (>= 2.6.6-3~), python-twisted -Standards-Version: 3.9.3 +Standards-Version: 3.9.5 Vcs-Git: git://anonscm.debian.org/tahoe/tahoe.git Vcs-Browser: http://anonscm.debian.org/gitweb/?p=tahoe/tahoe.git Homepage: http://tahoe-lafs.org/trac/tahoe @@ -15,7 +15,7 @@ Depends: ${misc:Depends}, ${python:Depends}, python-pyasn1, python-crypto, python-foolscap (>= 0.6.3), python-openssl, python-nevow, python-simplejson, python-zfec, python-pycryptopp (>= 0.5.29), python-setuptools, python-mock, - python-twisted + python-twisted, net-tools Description: Secure distributed filesystem Tahoe, the Least Authority File System, is a distributed filesystem that features high reliability, strong security properties, and a fine-grained diff -Nru tahoe-lafs-1.9.2/debian/copyright tahoe-lafs-1.10.0/debian/copyright --- tahoe-lafs-1.9.2/debian/copyright 2012-07-05 19:14:00.000000000 +0000 +++ tahoe-lafs-1.10.0/debian/copyright 2014-01-18 01:59:58.000000000 +0000 @@ -4,30 +4,38 @@ Source: http://tahoe-lafs.org/ Files: * -Copyright: Copyright 2006-2012 AllMyData, Inc. +Copyright: Copyright 2006-2013 AllMyData, Inc. License: GPL-2+ or TGPPL1+ +Files: setuptools-0.6c16dev4.egg/* +Copyright: Copyright Phillip J. Eby +License: PSF or Zope + License: GPL-2+ - You may use this package under the GNU General Public License, version 2 or, at - your option, any later version. You may also opt to use this package under the - terms of the Transitive Grace Period Public License, version 1 or, at your - option, any later version. - . - On Debian GNU/Linux systems, the complete text of the GNU General - Public License can be found in `/usr/share/common-licenses/GPL-2'. - . - The Transitive Grace Period Public Licence has requirements similar to the GPL - except that it allows you to wait for up to twelve months after you redistribute - a derived work before releasing the source code of your derived work. See the - file COPYING.TGPPL.html for the terms of the Transitive Grace Period Public - Licence, version 1, also included below. - . - This licence also comes with the added permission that you may link this - program with the OpenSSL library and distribute executables, as long as you - follow the requirements of this licence in regard to all of the software in - the executable aside from OpenSSL. + You may use this package under the GNU General Public License, version + 2 or, at your option, any later version. + . + This program is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published by the + Free Software Foundation; either version 2 of the License, or (at your + option) any later version. + . + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + . + You should have received a copy of the GNU General Public License along + with this package; if not, write to the Free Software Foundation, Inc., + 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + . + On Debian systems, the full text of the GNU General Public License + version 2 can be found in the file `/usr/share/common-licenses/GPL-2'. License: TGPPL1+ + You may also opt to use this package under the terms of the Transitive Grace + Period Public License, version 1 or, at your option, any later version. + . This Transitive Grace Period Public Licence (the "License") applies to any original work of authorship (the "Original Work") whose owner (the "Licensor") has placed the following licensing notice adjacent to the copyright notice for @@ -190,10 +198,6 @@ a notice of your own that is not confusingly similar to the notice in this License. -Files: setuptools-0.6c16dev3.egg/* -Copyright: Copyright Phillip J. Eby -License: PSF or Zope - License: PSF PSF LICENSE AGREEMENT FOR PYTHON 2.3 ------------------------------------ @@ -299,36 +303,3 @@ OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -Files: darcsver-1.7.2.egg/* -Copyright: Zooko O'Whielacronx -License: BSD - * Redistribution and use in source and binary forms, - * with or without modification, are permitted provided - * that the following conditions are met: - * - * Redistributions of source code must retain the above - * copyright notice, this list of conditions and the - * following disclaimer. - * - * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * Neither the name of the copyright holder nor the names - * of any other contributors may be used to endorse or - * promote products derived from this software without - * specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND - * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, - * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, - * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR - * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, - * OF SUCH DAMAGE. diff -Nru tahoe-lafs-1.9.2/debian/docs tahoe-lafs-1.10.0/debian/docs --- tahoe-lafs-1.9.2/debian/docs 2012-07-05 19:14:00.000000000 +0000 +++ tahoe-lafs-1.10.0/debian/docs 2014-01-18 01:59:58.000000000 +0000 @@ -1,3 +1,2 @@ README.txt docs/* -NEWS.rst diff -Nru tahoe-lafs-1.9.2/debian/patches/add_kFreeBSD_in_supported_os.diff tahoe-lafs-1.10.0/debian/patches/add_kFreeBSD_in_supported_os.diff --- tahoe-lafs-1.9.2/debian/patches/add_kFreeBSD_in_supported_os.diff 1970-01-01 00:00:00.000000000 +0000 +++ tahoe-lafs-1.10.0/debian/patches/add_kFreeBSD_in_supported_os.diff 2014-01-18 01:59:58.000000000 +0000 @@ -0,0 +1,21 @@ +Description: Add kFreeBSD support for automatic ip detection. + This patch temporary enable tahoe v1.10.0 to support kFreeBSD when trying to + guess the ip address. This is fixed in upstream trunk and this patch should be + removed when packaging tahoe-lafs v1.11. +Forwarded: yes +Bug-Debian: http://bugs.debian.org/700239 +Bug: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1918 +Author: Gonéri Le Bouder +Index: tahoe.git/src/allmydata/util/iputil.py +=================================================================== +--- tahoe.git.orig/src/allmydata/util/iputil.py 2013-09-04 10:52:16.381111327 +0200 ++++ tahoe.git/src/allmydata/util/iputil.py 2013-09-04 13:13:55.697104935 +0200 +@@ -161,6 +161,8 @@ + "netbsd4": "bsd", + "netbsd5": "bsd", + "netbsd6": "bsd", ++ "gnukfreebsd8": "bsd", ++ "gnukfreebsd9": "bsd", + "sunos5": "sunos", + "cygwin": "cygwin", + } diff -Nru tahoe-lafs-1.9.2/debian/patches/create_proper_tahoe_script_from_template.diff tahoe-lafs-1.10.0/debian/patches/create_proper_tahoe_script_from_template.diff --- tahoe-lafs-1.9.2/debian/patches/create_proper_tahoe_script_from_template.diff 1970-01-01 00:00:00.000000000 +0000 +++ tahoe-lafs-1.10.0/debian/patches/create_proper_tahoe_script_from_template.diff 2014-01-18 01:59:58.000000000 +0000 @@ -0,0 +1,143 @@ +Description: Create a proper bin/tahoe for Debian + Upstream used to patch the bin/tahoe during a dh_install override to create a + working tahoe script for the Debian package. Let's use a patch and keep the + debian/rules file cleaner. +Forwarded: not-needed +Origin: upstream, https://tahoe-lafs.org/trac/tahoe-lafs/changeset/623d82a1302ecdad33b61864de1ebaf34bb0d959 +Author: Brian Warner +Index: tahoe.git/bin/tahoe-script.template +=================================================================== +--- tahoe.git.orig/bin/tahoe-script.template 2013-10-08 14:24:02.141074127 +0200 ++++ tahoe.git/bin/tahoe-script.template 2013-10-10 14:16:27.509072780 +0200 +@@ -1,128 +1,4 @@ +-#!/bin/false # You must specify a python interpreter. +-import sys; assert sys.version_info < (3,), ur"Tahoe-LAFS does not run under Python 3. Please use a version of Python between 2.6 and 2.7.x inclusive." ++#!/usr/bin/python + +-import os, subprocess +- +-where = os.path.realpath(sys.argv[0]) +-base = os.path.dirname(os.path.dirname(where)) +- +-if sys.platform == "win32": +- perhaps_installed_tahoe = os.path.join(os.path.dirname(sys.executable), 'Scripts', 'tahoe.pyscript') +-else: +- perhaps_installed_tahoe = "/usr/bin/tahoe" +- +-whoami = '''\ +-I am a "bin%stahoe" executable for the convenience of running Tahoe-LAFS +-from its source distribution -- I work only when invoked as the "tahoe" +-script that lives in the "bin" subdirectory of a Tahoe source code +-distribution, and only if you have already run "python setup.py build". +-''' % (os.path.sep,) +- +-# look for Tahoe.home . +-homemarker = os.path.join(base, "Tahoe.home") +-if not os.path.exists(homemarker): +- print(whoami) +- print('''\ +-I just tried to run and found that I am not living in such a directory, so I +-am stopping now. To run Tahoe after it has been is installed, please execute +-my brother, who gets installed into the appropriate place for executables +-when you run "make install" (perhaps as "%s"). +-''' % (perhaps_installed_tahoe,)) +- sys.exit(1) +- +-# we've found our home. Put the tahoe support/lib etc. in our PYTHONPATH. +-if sys.platform == "win32": +- supportdir = os.path.join(base, "support", "Lib", "site-packages") +-else: +- supportdir = os.path.join(base, "support", +- "lib", +- "python%d.%d" % sys.version_info[:2], +- "site-packages") +- +-# update PYTHONPATH so that child processes (like twistd) will use this too +-pp = os.environ.get("PYTHONPATH") +-if pp: +- pp = os.pathsep.join([supportdir] + pp.split(os.pathsep)) +-else: +- pp = supportdir +-os.environ["PYTHONPATH"] = pp +- +-# find commandline args and the location of the tahoe executable. +-if sys.platform == "win32": +- import re +- from ctypes import WINFUNCTYPE, POINTER, byref, c_wchar_p, c_int, windll +- +- GetCommandLineW = WINFUNCTYPE(c_wchar_p)(("GetCommandLineW", windll.kernel32)) +- CommandLineToArgvW = WINFUNCTYPE(POINTER(c_wchar_p), c_wchar_p, POINTER(c_int)) \ +- (("CommandLineToArgvW", windll.shell32)) +- +- argc = c_int(0) +- argv_unicode = CommandLineToArgvW(GetCommandLineW(), byref(argc)) +- +- # See src/allmydata/scripts/runner.py for the corresponding unmangler. +- # Note that this doesn't escape \x7F. If it did, test_unicode_arguments_and_output +- # in test_runner.py wouldn't work. +- def mangle(s): +- return str(re.sub(u'[^\\x20-\\x7F]', lambda m: u'\x7F%x;' % (ord(m.group(0)),), s)) +- +- argv = [mangle(argv_unicode[i]) for i in xrange(0, argc.value)] +- +- # Take only the suffix with the same number of arguments as sys.argv. +- # This accounts for anything that can cause initial arguments to be stripped, +- # for example, the Python interpreter or any options passed to it, or runner +- # scripts such as 'coverage run'. It works even if there are no such arguments, +- # as in the case of a frozen executable created by bb-freeze or similar. +- +- argv = argv[-len(sys.argv):] +- +- # On Windows, the script is not directly executable and must be run via python. +- prefix = [sys.executable] +- script = os.path.join(base, "support", "Scripts", "tahoe.pyscript") +- args = argv[1:] +-else: +- # On non-Windows, invoke the script directly, so that 'top' for example shows 'tahoe'. +- prefix = [] +- script = os.path.join(base, "support", "bin", "tahoe") +- args = sys.argv[1:] +- +-# Support indirection via another "runner" script (e.g. coverage). +-# For example: bin/tahoe @RUNNER RUNNER_ARGS @tahoe TAHOE_ARGS +- +-if len(args) >= 1 and args[0].startswith('@'): +- runner = args[0][1:] +- if runner.endswith('.py') or runner.endswith('.pyscript'): +- prefix = [sys.executable] +- else: +- prefix = [] +- +- def _subst(a): +- if a == '@tahoe': return script +- return a +- command = prefix + [runner] + map(_subst, args[1:]) +-else: +- runner = script +- command = prefix + [script] + args +- +- if not os.path.exists(script): +- print(whoami) +- print('''\ +-I could not find the support script +-"%s". +- +-To run an installed version of Tahoe-LAFS, please execute the "tahoe" +-script that is installed into the appropriate place for executables +-when you run "python setup.py install" (perhaps as "%s"). +-''' % (script, perhaps_installed_tahoe)) +- sys.exit(1) +- +-try: +- res = subprocess.call(command, env=os.environ) +-except Exception as le: +- print(whoami) +- print('''\ +-I just tried to invoke "%s" +-and got an exception. +-''' % (runner,)) +- raise +-else: +- sys.exit(res) ++from allmydata.scripts import runner ++runner.run() diff -Nru tahoe-lafs-1.9.2/debian/patches/customize_setupcfg_aliases.diff tahoe-lafs-1.10.0/debian/patches/customize_setupcfg_aliases.diff --- tahoe-lafs-1.9.2/debian/patches/customize_setupcfg_aliases.diff 1970-01-01 00:00:00.000000000 +0000 +++ tahoe-lafs-1.10.0/debian/patches/customize_setupcfg_aliases.diff 2014-01-18 01:59:58.000000000 +0000 @@ -0,0 +1,20 @@ +Description: Customize upstream's defined setup.py aliases. + We don't need some of the setup.py options defined in setup.cfg by upstream in + the Debian package context. +Forwarded: not-needed +Author: bertagaz +Index: tahoe.git/setup.cfg +=================================================================== +--- tahoe.git.orig/setup.cfg 2013-10-09 14:41:43.909073001 +0200 ++++ tahoe.git/setup.cfg 2013-10-10 14:05:43.673073382 +0200 +@@ -5,8 +5,8 @@ + https://tahoe-lafs.org/source/tahoe-lafs/deps/tahoe-lafs-dep-eggs/ + + [aliases] +-build = update_version develop --prefix=support make_executable build +-test = update_version develop --prefix=support make_executable build trial ++build = update_version make_executable build ++test = update_version develop make_executable build trial + sdist = update_version sdist + install = update_version install + bdist_egg = update_version bdist_egg diff -Nru tahoe-lafs-1.9.2/debian/patches/exclude_buildtest_package.diff tahoe-lafs-1.10.0/debian/patches/exclude_buildtest_package.diff --- tahoe-lafs-1.9.2/debian/patches/exclude_buildtest_package.diff 2012-07-05 19:14:00.000000000 +0000 +++ tahoe-lafs-1.10.0/debian/patches/exclude_buildtest_package.diff 2014-01-18 01:59:58.000000000 +0000 @@ -1,26 +1,27 @@ Description: Exclude buildtest python package from Debian package. By default tahoe-lafs also install a buildtest package but this isn't relevant in the context of the Debian package. +Forwarded: not-needed Author: bertagaz -Index: tahoe/setup.py +Index: tahoe.git/setup.py =================================================================== ---- tahoe.orig/setup.py 2012-07-04 21:42:07.000000000 +0200 -+++ tahoe/setup.py 2012-07-04 21:59:45.051169297 +0200 -@@ -467,8 +467,7 @@ - 'allmydata.util', +--- tahoe.git.orig/setup.py 2013-09-03 19:04:12.745099226 +0200 ++++ tahoe.git/setup.py 2013-09-03 19:04:24.637106872 +0200 +@@ -436,8 +436,7 @@ 'allmydata.web', 'allmydata.web.static', + 'allmydata.web.static.css', - 'allmydata.windows', - 'buildtest'], + 'allmydata.windows'], classifiers=trove_classifiers, test_suite="allmydata.test", install_requires=install_requires, -Index: tahoe/src/allmydata_tahoe.egg-info/SOURCES.txt +Index: tahoe.git/src/allmydata_tahoe.egg-info/SOURCES.txt =================================================================== ---- tahoe.orig/src/allmydata_tahoe.egg-info/SOURCES.txt 2012-07-04 21:41:43.000000000 +0200 -+++ tahoe/src/allmydata_tahoe.egg-info/SOURCES.txt 2012-07-04 21:59:25.615659017 +0200 -@@ -431,7 +431,5 @@ +--- tahoe.git.orig/src/allmydata_tahoe.egg-info/SOURCES.txt 2013-09-03 19:00:55.393107907 +0200 ++++ tahoe.git/src/allmydata_tahoe.egg-info/SOURCES.txt 2013-09-03 19:04:12.753100409 +0200 +@@ -422,7 +422,5 @@ src/allmydata_tahoe.egg-info/not-zip-safe src/allmydata_tahoe.egg-info/requires.txt src/allmydata_tahoe.egg-info/top_level.txt @@ -30,10 +31,10 @@ -twisted/plugins/allmydata_trial.py \ No newline at end of file +twisted/plugins/allmydata_trial.py -Index: tahoe/src/allmydata_tahoe.egg-info/top_level.txt +Index: tahoe.git/src/allmydata_tahoe.egg-info/top_level.txt =================================================================== ---- tahoe.orig/src/allmydata_tahoe.egg-info/top_level.txt 2012-07-04 21:41:43.295659068 +0200 -+++ tahoe/src/allmydata_tahoe.egg-info/top_level.txt 2012-07-04 21:59:25.615659017 +0200 +--- tahoe.git.orig/src/allmydata_tahoe.egg-info/top_level.txt 2013-09-03 19:00:55.393107907 +0200 ++++ tahoe.git/src/allmydata_tahoe.egg-info/top_level.txt 2013-09-03 19:04:12.753100409 +0200 @@ -1,2 +1 @@ allmydata -buildtest diff -Nru tahoe-lafs-1.9.2/debian/patches/remove_upstream_setup_deps.diff tahoe-lafs-1.10.0/debian/patches/remove_upstream_setup_deps.diff --- tahoe-lafs-1.9.2/debian/patches/remove_upstream_setup_deps.diff 2012-07-05 19:14:00.000000000 +0000 +++ tahoe-lafs-1.10.0/debian/patches/remove_upstream_setup_deps.diff 2014-01-18 01:59:58.000000000 +0000 @@ -1,12 +1,14 @@ Description: Remove upstream setup dependencies. - Tahoe-lafs setup uses a custom setuptools which rely on darcsver to do some - smart guessings. This isn't relevant in Debian and needs to be ignored. + Tahoe-LAFS uses a custom setuptools included in its sources as python egg. + We need to patch setup.py so that it uses Debian's setuptools package at build + time. +Forwarded: not-needed Author: bertagaz -Index: tahoe/setup.py +Index: tahoe.git/setup.py =================================================================== ---- tahoe.orig/setup.py 2012-07-04 21:41:43.295659068 +0200 -+++ tahoe/setup.py 2012-07-04 21:42:07.535159561 +0200 -@@ -68,13 +68,13 @@ +--- tahoe.git.orig/setup.py 2013-10-09 14:41:43.909073001 +0200 ++++ tahoe.git/setup.py 2013-10-10 14:04:02.709073003 +0200 +@@ -68,11 +68,11 @@ del sys.argv[1] install_requires += ["fakedependency >= 1.0.0"] @@ -15,40 +17,19 @@ -egg = os.path.realpath(glob.glob('setuptools-*.egg')[0]) -sys.path.insert(0, egg) --egg = os.path.realpath(glob.glob('darcsver-*.egg')[0]) --sys.path.insert(0, egg) -import setuptools; setuptools.bootstrap_install_from = egg +#egg = os.path.realpath(glob.glob('setuptools-*.egg')[0]) +#sys.path.insert(0, egg) -+#egg = os.path.realpath(glob.glob('darcsver-*.egg')[0]) -+#sys.path.insert(0, egg) +#import setuptools; setuptools.bootstrap_install_from = egg from setuptools import setup from setuptools.command import sdist -@@ -476,7 +476,7 @@ - package_data={"allmydata.web": ["*.xhtml"], +@@ -446,7 +446,7 @@ "allmydata.web.static": ["*.js", "*.png", "*.css"], + "allmydata.web.static.css": ["*.css"], }, - setup_requires=setup_requires, + #setup_requires=setup_requires, entry_points = { 'console_scripts': [ 'tahoe = allmydata.scripts.runner:run' ] }, zip_safe=False, # We prefer unzipped for easier access. - versionfiles=['src/allmydata/_version.py',], -Index: tahoe/setup.cfg -=================================================================== ---- tahoe.orig/setup.cfg 2012-07-04 21:41:43.295659068 +0200 -+++ tahoe/setup.cfg 2012-07-04 21:58:29.839700075 +0200 -@@ -9,12 +9,3 @@ - tag_date = 0 - tag_svn_revision = 0 - --[aliases] --sdist_dsc = update_version sdist_dsc --sdist = update_version sdist --trial = update_version trial --build = update_version develop --prefix=support make_executable build --install = update_version install --test = update_version develop --prefix=support make_executable build trial --bdist_egg = update_version bdist_egg -- + **setup_args diff -Nru tahoe-lafs-1.9.2/debian/patches/series tahoe-lafs-1.10.0/debian/patches/series --- tahoe-lafs-1.9.2/debian/patches/series 2012-07-05 19:14:00.000000000 +0000 +++ tahoe-lafs-1.10.0/debian/patches/series 2014-01-18 01:59:58.000000000 +0000 @@ -1,2 +1,5 @@ remove_upstream_setup_deps.diff +customize_setupcfg_aliases.diff +create_proper_tahoe_script_from_template.diff exclude_buildtest_package.diff +add_kFreeBSD_in_supported_os.diff diff -Nru tahoe-lafs-1.9.2/debian/rules tahoe-lafs-1.10.0/debian/rules --- tahoe-lafs-1.9.2/debian/rules 2012-07-05 19:14:00.000000000 +0000 +++ tahoe-lafs-1.10.0/debian/rules 2014-01-18 01:59:58.000000000 +0000 @@ -20,14 +20,6 @@ rm -rf src/allmydata_tahoe.egg-info mv src/allmydata_tahoe.egg-info.orig src/allmydata_tahoe.egg-info - # Clean bin/tahoe trick stolen from upstream packaging script. - head -1 debian/$(package)/usr/bin/tahoe > debian/$(package)/usr/bin/tahoe.new - echo "from allmydata.scripts import runner" >> debian/$(package)/usr/bin/tahoe.new - echo "runner.run()" >> debian/$(package)//usr/bin/tahoe.new - chmod +x debian/$(package)/usr/bin/tahoe.new - mv debian/$(package)/usr/bin/tahoe.new debian/$(package)/usr/bin/tahoe - override_dh_installdocs: dh_installdocs - mv debian/$(package)/usr/share/doc/$(package)/NEWS.rst \ - debian/$(package)/usr/share/doc/$(package)/changelog + dh_installchangelogs -k NEWS.rst diff -Nru tahoe-lafs-1.9.2/debian/tahoe-lafs.default tahoe-lafs-1.10.0/debian/tahoe-lafs.default --- tahoe-lafs-1.9.2/debian/tahoe-lafs.default 1970-01-01 00:00:00.000000000 +0000 +++ tahoe-lafs-1.10.0/debian/tahoe-lafs.default 2014-01-18 01:59:58.000000000 +0000 @@ -0,0 +1,10 @@ +# Start only these tahoe-lafs nodes automatically via init script. Allowed +# values are "all", "none" or space separated list of tahoe-lafs nodes. If +# empty, "none" is assumed. +# +#AUTOSTART="all" +#AUTOSTART="none" +#AUTOSTART="home office" + +# Pass arguments to tahoe start. Default to "--syslog". +#DAEMONARGS= diff -Nru tahoe-lafs-1.9.2/debian/tahoe-lafs.dirs tahoe-lafs-1.10.0/debian/tahoe-lafs.dirs --- tahoe-lafs-1.9.2/debian/tahoe-lafs.dirs 2012-07-05 19:14:00.000000000 +0000 +++ tahoe-lafs-1.10.0/debian/tahoe-lafs.dirs 2014-01-18 01:59:58.000000000 +0000 @@ -1 +1,3 @@ usr/share/munin/plugins/ +var/lib/tahoe-lafs/ +usr/bin/ diff -Nru tahoe-lafs-1.9.2/debian/tahoe-lafs.init tahoe-lafs-1.10.0/debian/tahoe-lafs.init --- tahoe-lafs-1.9.2/debian/tahoe-lafs.init 1970-01-01 00:00:00.000000000 +0000 +++ tahoe-lafs-1.10.0/debian/tahoe-lafs.init 2014-01-18 01:59:58.000000000 +0000 @@ -0,0 +1,111 @@ +#!/bin/sh + +### BEGIN INIT INFO +# Provides: tahoe-lafs +# Required-Start: $network $remote_fs $syslog +# Required-Stop: $network $remote_fs $syslog +# Default-Start: 2 3 4 5 +# Default-Stop: 0 1 6 +# Short-Description: Tahoe-LAFS nodes +### END INIT INFO + +# Initscript by bertagaz +# Heavily reviewed by Zooko O'Whielacronx +# and intrigeri . + +set -e +set -u + +. /lib/lsb/init-functions + +DAEMON="/usr/bin/tahoe" +DESC="Tahoe-LAFS secure filesystems" +CONFIG_DIR="/var/lib/tahoe-lafs" +test -x $DAEMON || exit 1 +test -d $CONFIG_DIR || exit 1 + +# Source defaults file; edit that file to configure this script. +DAEMONARGS="--syslog" +AUTOSTART="" + +if [ -e /etc/default/tahoe-lafs ]; then + . /etc/default/tahoe-lafs +fi + +nodes_in () { + local dir="$1" + cd "$dir" && find ./ -mindepth 1 -maxdepth 1 -type d | cut -c3- +} + +node_uid () { + local node_dir="$1" + stat -c %U "$CONFIG_DIR/${node_dir}" +} + +_tahoe () { + local action="$1" + local node_name="$2" + local node_uid=$(node_uid "$node_name") + + if [ "$node_uid" = "root" ]; then + log_failure_msg "${node_name} node directory shouldn't be owned by root!" + return 1 + fi + + case "$action" in + start|restart) + su -s "/bin/sh" \ + -c "'$DAEMON' '$action' $DAEMONARGS '$CONFIG_DIR/${node_name}'" \ + "$node_uid" + ;; + stop) + su -s "/bin/sh" \ + -c "'$DAEMON' '$action' '$CONFIG_DIR/${node_name}'" \ + "$node_uid" + ;; + esac +} + +case "$1" in +start|stop|restart) + command="$1" + shift + + log_daemon_msg "$DESC" + log_progress_msg "\n" + + if [ $# -eq 0 ]; then + if [ "$AUTOSTART" = "none" ] || [ -z "$AUTOSTART" ]; then + log_warning_msg " Autostart disabled." + fi + if [ "$AUTOSTART" = "all" ]; then + # all nodes shall be taken care of automatically + for name in $(nodes_in $CONFIG_DIR); do + _tahoe "$command" "$name" || STATUS="$?" + done + else + # invoke command only for nodes specified in $AUTOSTART + for name in $AUTOSTART ; do + _tahoe "$command" "$name" || STATUS="$?" + done + fi + else + for name in "$@" ; do + _tahoe "$command" "$name" || STATUS="$?" + done + fi + log_end_msg "${STATUS:-0}" + ;; +force-reload) + shift + $0 restart "$@" + ;; +*) + echo "Usage: $0 {start|stop|restart|force-reload} [node(s)]" >&2 + exit 1 + ;; +esac + +exit 0 + +# vim:set ai sts=2 sw=2 tw=0: diff -Nru tahoe-lafs-1.9.2/debian/tahoe-lafs.install tahoe-lafs-1.10.0/debian/tahoe-lafs.install --- tahoe-lafs-1.9.2/debian/tahoe-lafs.install 2012-07-05 19:14:00.000000000 +0000 +++ tahoe-lafs-1.10.0/debian/tahoe-lafs.install 2014-01-18 01:59:58.000000000 +0000 @@ -1 +1,2 @@ misc/operations_helpers/munin/tahoe_* usr/share/munin/plugins/ +bin/tahoe usr/bin/ diff -Nru tahoe-lafs-1.9.2/docs/about.rst tahoe-lafs-1.10.0/docs/about.rst --- tahoe-lafs-1.9.2/docs/about.rst 2012-05-14 02:24:30.000000000 +0000 +++ tahoe-lafs-1.10.0/docs/about.rst 2013-09-03 15:38:27.000000000 +0000 @@ -27,7 +27,7 @@ mitigate these risks. What we mean by "security" is something different. *The service provider -never has the ability to read or modify your data in the first place—never.* +never has the ability to read or modify your data in the first place: never.* If you use Tahoe-LAFS, then all of the threats described above are non-issues to you. Not only is it easy and inexpensive for the service provider to maintain the security of your data, but in fact they couldn't violate its @@ -38,11 +38,11 @@ doesn't require you to perform a manual pre-encryption step or cumbersome key management. (After all, having to do cumbersome manual operations when storing or accessing your data would nullify one of the primary benefits of -using cloud storage in the first place—convenience.) +using cloud storage in the first place: convenience.) Here's how it works: -.. image:: https://tahoe-lafs.org/~zooko/network-and-reliance-topology.png +.. image:: network-and-reliance-topology.svg A "storage grid" is made up of a number of storage servers. A storage server has direct attached storage (typically one or more hard disks). A "gateway" diff -Nru tahoe-lafs-1.9.2/docs/architecture.rst tahoe-lafs-1.10.0/docs/architecture.rst --- tahoe-lafs-1.9.2/docs/architecture.rst 2012-05-14 02:24:30.000000000 +0000 +++ tahoe-lafs-1.10.0/docs/architecture.rst 2013-09-03 15:38:27.000000000 +0000 @@ -553,9 +553,3 @@ still retaining high reliability, but large unstable grids (where nodes are coming and going very quickly) may require more repair/verification bandwidth than actual upload/download traffic. - -Tahoe-LAFS nodes that run a webserver have a page dedicated to provisioning -decisions: this tool may help you evaluate different expansion factors and -view the disk consumption of each. It is also acquiring some sections with -availability/reliability numbers, as well as preliminary cost analysis data. -This tool will continue to evolve as our analysis improves. diff -Nru tahoe-lafs-1.9.2/docs/backdoors.rst tahoe-lafs-1.10.0/docs/backdoors.rst --- tahoe-lafs-1.9.2/docs/backdoors.rst 2012-05-14 02:24:30.000000000 +0000 +++ tahoe-lafs-1.10.0/docs/backdoors.rst 2013-09-03 15:38:27.000000000 +0000 @@ -22,13 +22,13 @@ The core Tahoe developers promise never to change Tahoe-LAFS to facilitate government access to data stored or transmitted by it. Even if it were -desirable to facilitate such access—which it is not—we believe it would not -be technically feasible to do so without severely compromising Tahoe-LAFS' -security against other attackers. There have been many examples in which -backdoors intended for use by government have introduced vulnerabilities -exploitable by other parties (a notable example being the Greek cellphone -eavesdropping scandal in 2004/5). RFCs `1984`_ and `2804`_ elaborate on the -security case against such backdoors. +desirable to facilitate such access -- which it is not -- we believe it would +not be technically feasible to do so without severely compromising +Tahoe-LAFS' security against other attackers. There have been many examples +in which backdoors intended for use by government have introduced +vulnerabilities exploitable by other parties (a notable example being the +Greek cellphone eavesdropping scandal in 2004/5). RFCs `1984`_ and `2804`_ +elaborate on the security case against such backdoors. .. _1984: https://tools.ietf.org/html/rfc1984 .. _2804: https://tools.ietf.org/html/rfc2804 @@ -39,7 +39,7 @@ The following Tahoe-LAFS developers agree with this statement: -David-Sarah Hopwood +David-Sarah Hopwood [Daira Hopwood] Zooko Wilcox-O'Hearn diff -Nru tahoe-lafs-1.9.2/docs/configuration.rst tahoe-lafs-1.10.0/docs/configuration.rst --- tahoe-lafs-1.9.2/docs/configuration.rst 2012-06-23 22:41:21.000000000 +0000 +++ tahoe-lafs-1.10.0/docs/configuration.rst 2013-09-03 15:38:27.000000000 +0000 @@ -294,9 +294,9 @@ This FURL tells the client how to connect to the introducer. Each Tahoe-LAFS grid is defined by an introducer. The introducer's FURL is - created by the introducer node and written into its base directory when - it starts, whereupon it should be published to everyone who wishes to - attach a client to that grid + created by the introducer node and written into its private base + directory when it starts, whereupon it should be published to everyone + who wishes to attach a client to that grid ``helper.furl = (FURL string, optional)`` @@ -346,8 +346,13 @@ guarantee the availability of the uploaded file. This value should not be larger than the number of servers on your grid. - A value of ``shares.happy`` <= ``k`` is allowed, but does not provide any - redundancy if some servers fail or lose shares. + A value of ``shares.happy`` <= ``k`` is allowed, but this is not + guaranteed to provide any redundancy if some servers fail or lose shares. + It may still provide redundancy in practice if ``N`` is greater than + the number of connected servers, because in that case there will typically + be more than one share on at least some storage nodes. However, since a + successful upload only guarantees that at least ``shares.happy`` shares + have been stored, the worst case is still that there is no redundancy. (Mutable files use a different share placement algorithm that does not currently consider this parameter.) @@ -370,8 +375,8 @@ controlled by this parameter and will always use SDMF. We may revisit this decision in future versions of Tahoe-LAFS. - See ``_ for details about mutable - file formats. + See ``_ for details about mutable file + formats. Frontend Configuration ====================== @@ -448,10 +453,11 @@ runs.) This string contains a number, with an optional case-insensitive scale - suffix like "K" or "M" or "G", and an optional "B" or "iB" suffix. So - "100MB", "100M", "100000000B", "100000000", and "100000kb" all mean the - same thing. Likewise, "1MiB", "1024KiB", and "1048576B" all mean the same - thing. + suffix, optionally followed by "B" or "iB". The supported scale suffixes + are "K", "M", "G", "T", "P" and "E", and a following "i" indicates to use + powers of 1024 rather than 1000. So "100MB", "100 M", "100000000B", + "100000000", and "100000kb" all mean the same thing. Likewise, "1MiB", + "1024KiB", "1024 Ki", and "1048576 B" all mean the same thing. "``tahoe create-node``" generates a tahoe.cfg with "``reserved_space=1G``", but you may wish to raise, lower, or remove the @@ -501,7 +507,7 @@ The Introducer node maintains some different state than regular client nodes. -``BASEDIR/introducer.furl`` +``BASEDIR/private/introducer.furl`` This is generated the first time the introducer node is started, and used again on subsequent runs, to give the introduction service a persistent diff -Nru tahoe-lafs-1.9.2/docs/convergence-secret.rst tahoe-lafs-1.10.0/docs/convergence-secret.rst --- tahoe-lafs-1.9.2/docs/convergence-secret.rst 1970-01-01 00:00:00.000000000 +0000 +++ tahoe-lafs-1.10.0/docs/convergence-secret.rst 2013-09-03 15:38:27.000000000 +0000 @@ -0,0 +1,78 @@ + + +What Is It? +----------- + +The identifier of a file (also called the "capability" to a file) is derived +from two pieces of information when the file is uploaded: the content of the +file and the upload client's "convergence secret". By default, the +convergence secret is randomly generated by the client when it first starts +up, then stored in the client's base directory (/private/convergence) and re-used after that. So the same file content +uploaded from the same client will always have the same cap. Uploading the +file from a different client with a different convergence secret would result +in a different cap -- and in a second copy of the file's contents stored on +the grid. If you want files you upload to converge (also known as +"deduplicate") with files uploaded by someone else, just make sure you're +using the same convergence secret when you upload files as them. + +The advantages of deduplication should be clear, but keep in mind that the +convergence secret was created to protect confidentiality. There are two +attacks that can be used against you by someone who knows the convergence +secret you use. + +The first one is called the "Confirmation-of-a-File Attack". Someone who +knows the convergence secret that you used when you uploaded a file, and who +has a copy of that file themselves, can check whether you have a copy of that +file. This is usually not a problem, but it could be if that file is, for +example, a book or movie that is banned in your country. + +The second attack is more subtle. It is called the +"Learn-the-Remaining-Information Attack". Suppose you've received a +confidential document, such as a PDF from your bank which contains many pages +of boilerplate text as well as containing your bank account number and +balance. Someone who knows your convergence secret can generate a file with +all of the boilerplate text (perhaps they would open an account with the same +bank so they receive the same document with their account number and +balance). Then they can try a "brute force search" to find your account +number and your balance. + +The defense against these attacks is that only someone who knows the +convergence secret that you used on each file can perform these attacks on +that file. + +Both of these attacks and the defense are described in more detail in `Drew +Perttula's Hack Tahoe-LAFS Hall Of Fame entry`_ + +.. _`Drew Perttula's Hack Tahoe-LAFS Hall Of Fame entry`: + https://tahoe-lafs.org/hacktahoelafs/drew_perttula.html + +What If I Change My Convergence Secret? +--------------------------------------- + +All your old file capabilities will still work, but the new data that you +upload will not be deduplicated with the old data. If you upload all of the +same things to the grid, you will end up using twice the space until garbage +collection kicks in (if it's enabled). Changing the convergence secret that a +storage client uses for uploads can be though of as moving the client to a +new "deduplication domain". + +How To Use It +------------- + +To enable deduplication between different clients, **securely** copy the +convergence secret file from one client to all the others. + +For example, if you are on host A and have an account on host B and you have +scp installed, run: + + *scp ~/.tahoe/private/convergence + my_other_account@B:.tahoe/private/convergence* + +If you have two different clients on a single computer, say one for each +disk, you would do: + + *cp /tahoe1/private/convergence /tahoe2/private/convergence* + +After you change the convergence secret file, you must restart the client +before it will stop using the old one and read the new one from the file. diff -Nru tahoe-lafs-1.9.2/docs/frontends/CLI.rst tahoe-lafs-1.10.0/docs/frontends/CLI.rst --- tahoe-lafs-1.9.2/docs/frontends/CLI.rst 2012-05-14 02:07:12.000000000 +0000 +++ tahoe-lafs-1.10.0/docs/frontends/CLI.rst 2013-09-03 15:38:27.000000000 +0000 @@ -56,11 +56,18 @@ like Twisted, Foolscap, pycryptopp, and zfec. "``tahoe --version-and-path``" will also show the path from which each library was imported. -On Unix systems, the shell expands filename wildcards (``*`` and ``?``) -before the program is able to read them, which may produce unexpected -results for many ``tahoe`` comands. We recommend that you avoid using them. -On Windows, wildcards cannot be used to specify multiple filenames to -``tahoe``. +On Unix systems, the shell expands filename wildcards (``'*'`` and ``'?'``) +before the program is able to read them, which may produce unexpected results +for many ``tahoe`` comands. We recommend, if you use wildcards, to start the +path with "``./``", for example "``tahoe cp -r ./* somewhere:``". This +prevents the expanded filename from being interpreted as an option or as an +alias, allowing filenames that start with a dash or contain colons to be +handled correctly. + +On Windows, a single letter followed by a colon is treated as a drive +specification rather than an alias (and is invalid unless a local path is +allowed in that context). Wildcards cannot be used to specify multiple +filenames to ``tahoe`` on Windows. Unicode Support --------------- @@ -104,8 +111,8 @@ "``tahoe create-introducer [NODEDIR]``" is used to create the Introducer node. This node provides introduction services and nothing else. When started, this -node will produce an ``introducer.furl`` file, which should be published to all -clients. +node will produce a ``private/introducer.furl`` file, which should be +published to all clients. "``tahoe create-key-generator [NODEDIR]``" is used to create a special "key-generation" service, which allows a client to offload their RSA key @@ -136,7 +143,7 @@ These commands let you exmaine a Tahoe-LAFS filesystem, providing basic list/upload/download/unlink/rename/mkdir functionality. They can be used as primitives by other scripts. Most of these commands are fairly thin wrappers -around web-API calls, which are described in ``_. +around web-API calls, which are described in ``__. By default, all filesystem-manipulation commands look in ``~/.tahoe/`` to figure out which Tahoe-LAFS node they should use. When the CLI command makes @@ -152,7 +159,7 @@ Starting Directories -------------------- -As described in `docs/architecture.rst <../architecture.rst>`_, the +As described in `docs/architecture.rst <../architecture.rst>`__, the Tahoe-LAFS distributed filesystem consists of a collection of directories and files, each of which has a "read-cap" or a "write-cap" (also known as a URI). Each directory is simply a table that maps a name to a child file @@ -286,6 +293,11 @@ * ``DIRCAP/[SUBDIRS/]FILENAME`` or ``DIRCAP:./[SUBDIRS/]FILENAME`` for a path relative to a directory cap. +See `CLI Command Overview`_ above for information on using wildcards with +local paths, and different treatment of colons between Unix and Windows. + +``FROMLOCAL`` or ``TOLOCAL`` is a path in the local filesystem. + Command Examples ---------------- @@ -304,6 +316,9 @@ Since Tahoe-LAFS v1.8.2, the alias name can be given with or without the trailing colon. + On Windows, the alias should not be a single character, because it would be + confused with the drive letter of a local path. + ``tahoe create-alias fun`` This combines "``tahoe mkdir``" and "``tahoe add-alias``" into a single step. @@ -480,7 +495,7 @@ Same as above, but this time the backup process will ignore any filename that will end with '~'. ``--exclude`` will accept any standard Unix shell-style wildcards, as implemented by the - `Python fnmatch module `_. + `Python fnmatch module `__. You may give multiple ``--exclude`` options. Please pay attention that the pattern will be matched against any level of the directory tree; it's still impossible to specify absolute path exclusions. diff -Nru tahoe-lafs-1.9.2/docs/frontends/FTP-and-SFTP.rst tahoe-lafs-1.10.0/docs/frontends/FTP-and-SFTP.rst --- tahoe-lafs-1.9.2/docs/frontends/FTP-and-SFTP.rst 2012-06-18 00:28:10.000000000 +0000 +++ tahoe-lafs-1.10.0/docs/frontends/FTP-and-SFTP.rst 2013-09-03 15:38:27.000000000 +0000 @@ -250,16 +250,10 @@ Non-ASCII filenames are supported with SFTP only if the client encodes filenames as UTF-8 (`ticket #1089`_). -The gateway node may hang or consume 100% CPU if the client tries to rekey. -(`ticket #1297`_). This is due to a bug in Twisted (`Twisted ticket #4395`_) -which was fixed in Twisted 11.0 (released 3-April-2011). - See also wiki:SftpFrontend_. .. _ticket #1059: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1059 .. _ticket #1089: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1089 -.. _ticket #1297: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1297 -.. _Twisted ticket #4395: https://twistedmatrix.com/trac/ticket/4395 Known Issues in the FTP Frontend -------------------------------- diff -Nru tahoe-lafs-1.9.2/docs/frontends/webapi.rst tahoe-lafs-1.10.0/docs/frontends/webapi.rst --- tahoe-lafs-1.9.2/docs/frontends/webapi.rst 2012-07-01 23:07:28.000000000 +0000 +++ tahoe-lafs-1.10.0/docs/frontends/webapi.rst 2013-09-03 15:38:27.000000000 +0000 @@ -14,23 +14,24 @@ 1. `Reading a file`_ 2. `Writing/Uploading a File`_ 3. `Creating a New Directory`_ - 4. `Getting Information About A File Or Directory (as JSON)`_ + 4. `Getting Information About a File Or Directory (as JSON)`_ 5. `Attaching an Existing File or Directory by its read- or write-cap`_ 6. `Adding Multiple Files or Directories to a Parent Directory at Once`_ 7. `Unlinking a File or Directory`_ 6. `Browser Operations: Human-Oriented Interfaces`_ - 1. `Viewing A Directory (as HTML)`_ + 1. `Viewing a Directory (as HTML)`_ 2. `Viewing/Downloading a File`_ - 3. `Getting Information About A File Or Directory (as HTML)`_ + 3. `Getting Information About a File Or Directory (as HTML)`_ 4. `Creating a Directory`_ 5. `Uploading a File`_ - 6. `Attaching An Existing File Or Directory (by URI)`_ - 7. `Unlinking A Child`_ - 8. `Renaming A Child`_ - 9. `Other Utilities`_ - 10. `Debugging and Testing Features`_ + 6. `Attaching an Existing File Or Directory (by URI)`_ + 7. `Unlinking a Child`_ + 8. `Renaming a Child`_ + 9. `Relinking ("Moving") a Child`_ + 10. `Other Utilities`_ + 11. `Debugging and Testing Features`_ 7. `Other Useful Pages`_ 8. `Static Files in /public_html`_ @@ -156,9 +157,7 @@ http://127.0.0.1:3456/uri/ + $CAP -So, to access the directory named above (which happens to be the -publically-writeable sample directory on the Tahoe test grid, described at -http://allmydata.org/trac/tahoe-lafs/wiki/TestGrid), the URL would be:: +So, to access the directory named above, the URL would be:: http://127.0.0.1:3456/uri/URI%3ADIR2%3Adjrdkfawoqihigoett4g6auz6a%3Ajx5mplfpwexnoqff7y5e4zjus4lidm76dcuarpct7cckorh2dpgq/ @@ -336,7 +335,7 @@ operations that are intended for web browsers. -Reading A File +Reading a File -------------- ``GET /uri/$FILECAP`` @@ -352,7 +351,7 @@ purpose. -Writing/Uploading A File +Writing/Uploading a File ------------------------ ``PUT /uri/$FILECAP`` @@ -415,7 +414,7 @@ interprets those arguments in the same way as the linked forms of PUT described immediately above. -Creating A New Directory +Creating a New Directory ------------------------ ``POST /uri?t=mkdir`` @@ -458,7 +457,6 @@ { "Fran\u00e7ais": [ "filenode", { "ro_uri": "URI:CHK:...", - "size": bytes, "metadata": { "ctime": 1202777696.7564139, "mtime": 1202777696.7564139, @@ -661,7 +659,7 @@ or already has a child named NAME. -Getting Information About A File Or Directory (as JSON) +Getting Information About a File Or Directory (as JSON) ------------------------------------------------------- ``GET /uri/$FILECAP?t=json`` @@ -954,7 +952,7 @@ existing "tahoe" metadata is preserved. The metadata["tahoe"] value is reserved for metadata generated by the tahoe node itself. The only two keys currently placed here are "linkcrtime" and "linkmotime". For details, see - the section above entitled "Get Information About A File Or Directory (as + the section above entitled "Getting Information About a File Or Directory (as JSON)", in the "About the metadata" subsection. Note that this command was introduced with the name "set_children", which @@ -1010,7 +1008,7 @@ descriptions below display the most significant arguments as URL query args. -Viewing A Directory (as HTML) +Viewing a Directory (as HTML) ----------------------------- ``GET /uri/$DIRCAP/[SUBDIRS../]`` @@ -1066,7 +1064,7 @@ URLs may also use /file/$FILECAP/FILENAME as a synonym for /named/$FILECAP/FILENAME. -Getting Information About A File Or Directory (as HTML) +Getting Information About a File Or Directory (as HTML) ------------------------------------------------------- ``GET /uri/$FILECAP?t=info`` @@ -1122,7 +1120,7 @@ This accepts a format= argument in the query string. Refer to the documentation of POST /uri/$DIRCAP/[SUBDIRS../]?t=mkdir&name=CHILDNAME in - `Creating A New Directory`_ for information on the behavior of the format= + `Creating a New Directory`_ for information on the behavior of the format= argument. If a "when_done=URL" argument is provided, the HTTP response will cause the @@ -1162,7 +1160,7 @@ operation took, etc. This accepts format= and mutable=true query string arguments. Refer to - `Writing/Uploading A File`_ for information on the behavior of format= and + `Writing/Uploading a File`_ for information on the behavior of format= and mutable=true. ``POST /uri/$DIRCAP/[SUBDIRS../]?t=upload`` @@ -1200,7 +1198,7 @@ already exist. This accepts format= and mutable=true query string arguments. Refer to - `Writing/Uploading A File`_ for information on the behavior of format= and + `Writing/Uploading a File`_ for information on the behavior of format= and mutable=true. If a "when_done=URL" argument is provided, the HTTP response will cause the @@ -1245,12 +1243,12 @@ This accepts the same replace= argument as POST t=upload. -Unlinking A Child +Unlinking a Child ----------------- ``POST /uri/$DIRCAP/[SUBDIRS../]?t=delete&name=CHILDNAME`` -``POST /uri/$DIRCAP/[SUBDIRS../]?t=unlink&name=CHILDNAME`` +``POST /uri/$DIRCAP/[SUBDIRS../]?t=unlink&name=CHILDNAME`` (Tahoe >= v1.9) This instructs the node to remove a child object (file or subdirectory) from the given directory, which must be mutable. Note that the entire subtree is @@ -1264,7 +1262,7 @@ be used. -Renaming A Child +Renaming a Child ---------------- ``POST /uri/$DIRCAP/[SUBDIRS../]?t=rename&from_name=OLD&to_name=NEW`` @@ -1274,8 +1272,73 @@ same child-cap under the new name, except that it preserves metadata. This operation cannot move the child to a different directory. - This operation will replace any existing child of the new name, making it - behave like the UNIX "``mv -f``" command. + The default behavior is to overwrite any existing link at the destination + (replace=true). To prevent this (and make the operation return an error + instead of overwriting), add a "replace=false" argument. With replace=false, + this operation will return an HTTP 409 "Conflict" error if the destination + is not the same link as the source and there is already a link at the + destination, rather than overwriting the existing link. To allow the + operation to overwrite a link to a file, but return an HTTP 409 error when + trying to overwrite a link to a directory, use "replace=only-files" (this + behavior is closer to the traditional UNIX "mv" command). Note that "true", + "t", and "1" are all synonyms for "True"; "false", "f", and "0" are synonyms + for "False"; and the parameter is case-insensitive. + + +Relinking ("Moving") a Child +---------------------------- + +``POST /uri/$DIRCAP/[SUBDIRS../]?t=relink&from_name=OLD&to_dir=$NEWDIRCAP/[NEWSUBDIRS../]&to_name=NEW`` + ``[&replace=true|false|only-files]`` (Tahoe >= v1.10) + + This instructs the node to move a child of the given source directory, into + a different directory and/or to a different name. The command is named + ``relink`` because what it does is add a new link to the child from the new + location, then remove the old link. Nothing is actually "moved": the child + is still reachable through any path from which it was formerly reachable, + and the storage space occupied by its ciphertext is not affected. + + The source and destination directories must be writeable. If {{{to_dir}}} is + not present, the child link is renamed within the same directory. If + {{{to_name}}} is not present then it defaults to {{{from_name}}}. If the + destination link (directory and name) is the same as the source link, the + operation has no effect. + + Metadata from the source directory entry is preserved. Multiple levels of + descent in the source and destination paths are supported. + + This operation will return an HTTP 404 "Not Found" error if + ``$DIRCAP/[SUBDIRS../]``, the child being moved, or the destination + directory does not exist. It will return an HTTP 400 "Bad Request" error + if any entry in the source or destination paths is not a directory. + + The default behavior is to overwrite any existing link at the destination + (replace=true). To prevent this (and make the operation return an error + instead of overwriting), add a "replace=false" argument. With replace=false, + this operation will return an HTTP 409 "Conflict" error if the destination + is not the same link as the source and there is already a link at the + destination, rather than overwriting the existing link. To allow the + operation to overwrite a link to a file, but return an HTTP 409 error when + trying to overwrite a link to a directory, use "replace=only-files" (this + behavior is closer to the traditional UNIX "mv" command). Note that "true", + "t", and "1" are all synonyms for "True"; "false", "f", and "0" are synonyms + for "False"; and the parameter is case-insensitive. + + When relinking into a different directory, for safety, the child link is + not removed from the old directory until it has been successfully added to + the new directory. This implies that in case of a crash or failure, the + link to the child will not be lost, but it could be linked at both the old + and new locations. + + The source link should not be the same as any link (directory and child name) + in the ``to_dir`` path. This restriction is not enforced, but it may be + enforced in a future version. If it were violated then the result would be + to create a cycle in the directory structure that is not necessarily reachable + from the root of the destination path (``$NEWDIRCAP``), which could result in + data loss, as described in ticket `#943`_. + +.. _`#943`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/943 + Other Utilities --------------- @@ -1298,6 +1361,8 @@ functionality described above, with the provided $CHILDNAME present in the 'from_name' field of that form. I.e. this presents a form offering to rename $CHILDNAME, requesting the new name, and submitting POST rename. + This same URL format can also be used with "move-form" with the expected + results. ``GET /uri/$DIRCAP/[SUBDIRS../]CHILDNAME?t=uri`` @@ -1346,14 +1411,16 @@ count-shares-good: the number of good shares that were found count-shares-needed: 'k', the number of shares required for recovery count-shares-expected: 'N', the number of total shares generated - count-good-share-hosts: the number of distinct storage servers with good - shares. Note that a high value does not necessarily - imply good share distribution, because some of - these servers may only hold duplicate shares. + count-good-share-hosts: the number of distinct storage servers with + good shares. Note that a high value does not + necessarily imply good share distribution, + because some of these servers may only hold + duplicate shares. count-wrong-shares: for mutable files, the number of shares for versions other than the 'best' one (highest sequence number, highest roothash). These are - either old ... + either old, or created by an uncoordinated or + not fully successful write. count-recoverable-versions: for mutable files, the number of recoverable versions of the file. For a healthy file, this will equal 1. @@ -1367,9 +1434,9 @@ needs-rebalancing: (bool) This field is intended to be True iff reliability could be improved for this file by rebalancing, i.e. by moving some shares to other - servers. It is not guaranteed to be computed correctly - in Tahoe-LAFS up to and including v1.9.2, and its - precise definition may change in future versions. + servers. It may be incorrect in some cases for + Tahoe-LAFS up to and including v1.10, and its + precise definition is expected to change. servers-responding: list of base32-encoded storage server identifiers, one for each server which responded to the share query. @@ -1811,17 +1878,6 @@ implementation hashes synchronously, so clients will probably never see progress-hash!=1.0). -``GET /provisioning/`` - - This page provides a basic tool to predict the likely storage and bandwidth - requirements of a large Tahoe grid. It provides forms to input things like - total number of users, number of files per user, average file size, number - of servers, expansion ratio, hard drive failure rate, etc. It then provides - numbers like how many disks per server will be needed, how many read - operations per second should be expected, and the likely MTBF for files in - the grid. This information is very preliminary, and the model upon which it - is based still needs a lot of work. - ``GET /helper_status/`` If the node is running a helper (i.e. if [helper]enabled is set to True in diff -Nru tahoe-lafs-1.9.2/docs/helper.rst tahoe-lafs-1.10.0/docs/helper.rst --- tahoe-lafs-1.9.2/docs/helper.rst 2012-05-14 02:07:12.000000000 +0000 +++ tahoe-lafs-1.10.0/docs/helper.rst 2013-09-03 15:38:27.000000000 +0000 @@ -10,12 +10,14 @@ Overview ======== -As described in the `"Swarming Download, Trickling Upload" section of -architecture.rst `_, -Tahoe uploads require more bandwidth than downloads: you must push the -redundant shares during upload, but you do not need to retrieve them during -download. With the default 3-of-10 encoding parameters, this means that an -upload will require about 3.3x the traffic as a download of the same file. +As described in the "Swarming Download, Trickling Upload" section of +`architecture.rst`_, Tahoe uploads require more bandwidth than downloads: you +must push the redundant shares during upload, but you do not need to retrieve +them during download. With the default 3-of-10 encoding parameters, this +means that an upload will require about 3.3x the traffic as a download of the +same file. + +.. _architecture.rst: file:architecture.rst Unfortunately, this "expansion penalty" occurs in the same upstream direction that most consumer DSL lines are slow anyways. Typical ADSL lines get 8 times @@ -90,7 +92,7 @@ You can tell if your node is running a helper by looking at its web status page. Assuming that you've set up the 'webport' to use port 3456, point your -browser at http://localhost:3456/ . The welcome page will say "Helper: 0 +browser at ``http://localhost:3456/`` . The welcome page will say "Helper: 0 active uploads" or "Not running helper" as appropriate. The http://localhost:3456/helper_status page will also provide details on what the helper is currently doing. @@ -125,23 +127,21 @@ * clients who have been given the helper.furl by someone who is running a Helper and is willing to let them use it -To take advantage of somebody else's Helper, take the helper.furl file that -they give you, and copy it into your node's base directory, then restart the -node: - -:: - - cat email >$BASEDIR/helper.furl - tahoe restart $BASEDIR - -This will signal the client to try and connect to the helper. Subsequent -uploads will use the helper rather than using direct connections to the -storage server. +To take advantage of somebody else's Helper, take the helper furl that they +give you, and edit your tahoe.cfg file. Enter the helper's furl into the +value of the key "helper.furl" in the "[client]" section of tahoe.cfg, as +described in the "Client Configuration" section of configuration.rst_. + +.. _configuration.rst: file:configuration.rst + +Then restart the node. This will signal the client to try and connect to the +helper. Subsequent uploads will use the helper rather than using direct +connections to the storage server. If the node has been configured to use a helper, that node's HTTP welcome -page (http://localhost:3456/) will say "Helper: $HELPERFURL" instead of -"Helper: None". If the helper is actually running and reachable, the next -line will say "Connected to helper?: yes" instead of "no". +page (``http://localhost:3456/``) will say "Helper: $HELPERFURL" instead of +"Helper: None". If the helper is actually running and reachable, the bullet +to the left of "Helper" will be green. The helper is optional. If a helper is connected when an upload begins, the upload will use the helper. If there is no helper connection present when an @@ -150,7 +150,7 @@ connection is lost, using the same exponential-backoff algorithm as all other tahoe/foolscap connections. -The upload/download status page (http://localhost:3456/status) will announce +The upload/download status page (``http://localhost:3456/status``) will announce the using-helper-or-not state of each upload, in the "Helper?" column. Other Helper Modes diff -Nru tahoe-lafs-1.9.2/docs/known_issues.rst tahoe-lafs-1.10.0/docs/known_issues.rst --- tahoe-lafs-1.9.2/docs/known_issues.rst 2012-07-03 16:28:38.000000000 +0000 +++ tahoe-lafs-1.10.0/docs/known_issues.rst 2013-09-03 15:38:27.000000000 +0000 @@ -18,7 +18,7 @@ .. _the "historical known issues" document: historical/historical_known_issues.txt -Known Issues in Tahoe-LAFS v1.9.2, released 3-Jul-2012 +Known Issues in Tahoe-LAFS v1.10, released 01-May-2013 ====================================================== * `Unauthorized access by JavaScript in unrelated files`_ @@ -27,6 +27,7 @@ * `Capabilities may be leaked to web browser phishing filter / "safe browsing" servers`_ * `Known issues in the FTP and SFTP frontends`_ * `Traffic analysis based on sizes of files/directories, storage indices, and timing`_ + * `Privacy leak via Google Chart API link in map-update timing web page`_ ---- @@ -254,6 +255,48 @@ ---- +Privacy leak via Google Chart API link in map-update timing web page +-------------------------------------------------------------------- + +The Tahoe web-based user interface includes a diagnostic page known as the +"map-update timing page". It is reached through the "Recent and Active +Operations" link on the front welcome page, then through the "Status" column +for "map-update" operations (which occur when mutable files, including +directories, are read or written). This page contains per-server response +times, as lines of text, and includes an image which displays the response +times in graphical form. The image is generated by constructing a URL for the +`Google Chart API `_, which is +then served by the `chart.apis.google.com` internet server. + +When you view this page, several parties may learn information about your +Tahoe activities. The request will typically include a "Referer" header, +revealing the URL of the mapupdate status page (which is typically something +like "http://127.0.0.1:3456/status/mapupdate-123") to network observers and +the Google API server. The image returned by this server is typically a PNG +file, but either the server or a MitM attacker could replace it with +something malicious that attempts to exploit a browser rendering bug or +buffer overflow. (Note that browsers do not execute scripts inside IMG tags, +even for SVG images). + +In addition, if your Tahoe node connects to its grid over Tor or i2p, but the +web browser you use to access your node does not, then this image link may +reveal your use of Tahoe (and that grid) to the outside world. It is not +recommended to use a browser in this way, because other links in Tahoe-stored +content would reveal even more information (e.g. an attacker could store an +HTML file with unique CSS references into a shared Tahoe grid, then send your +pseudonym a message with its URI, then observe your browser loading that CSS +file, and thus link the source IP address of your web client to that +pseudonym). + +A future version of Tahoe will probably replace the Google Chart API link +(which was deprecated by Google in April 2012) with client-side javascript +using d3.js, removing the information leak but requiring JS to see the chart. +See ticket `#1942`_ for details. + +.. _#1942: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1942 + +---- + Known Issues in Tahoe-LAFS v1.9.0, released 31-Oct-2011 ======================================================= @@ -318,7 +361,7 @@ A person could learn the storage index of a file in several ways: -1. By being granted the authority to read the immutable file—i.e. by being +1. By being granted the authority to read the immutable file: i.e. by being granted a read capability to the file. They can determine the file's storage index from its read capability. @@ -347,17 +390,17 @@ longer vulnerable to this problem. Note that the issue is local to each storage server independently of other -storage servers—when you upgrade a storage server then that particular +storage servers: when you upgrade a storage server then that particular storage server can no longer be tricked into deleting its shares of the target file. If you can't immediately upgrade your storage server to a version of Tahoe-LAFS that eliminates this vulnerability, then you could temporarily shut down your storage server. This would of course negatively impact -availability—clients would not be able to upload or download shares to that -particular storage server while it was shut down—but it would protect the -shares already stored on that server from being deleted as long as the server -is shut down. +availability -- clients would not be able to upload or download shares to +that particular storage server while it was shut down -- but it would protect +the shares already stored on that server from being deleted as long as the +server is shut down. If the servers that store shares of your file are running a version of Tahoe-LAFS with this vulnerability, then you should think about whether diff -Nru tahoe-lafs-1.9.2/docs/known_issues.rst.orig tahoe-lafs-1.10.0/docs/known_issues.rst.orig --- tahoe-lafs-1.9.2/docs/known_issues.rst.orig 2012-05-14 02:50:17.000000000 +0000 +++ tahoe-lafs-1.10.0/docs/known_issues.rst.orig 1970-01-01 00:00:00.000000000 +0000 @@ -1,382 +0,0 @@ - -See also cautions.rst_. - -.. _cautions.rst: file:cautions.rst - -============ -Known Issues -============ - -Below is a list of known issues in recent releases of Tahoe-LAFS, and how to -manage them. The current version of this file can be found at -https://tahoe-lafs.org/source/tahoe-lafs/trunk/docs/known_issues.rst . - -If you've been using Tahoe-LAFS since v1.1 (released 2008-06-11) or if you're -just curious about what sort of mistakes we've made in the past, then you might -want to read `the "historical known issues" document`_. - -.. _the "historical known issues" document: historical/historical_known_issues.txt - - -Known Issues in Tahoe-LAFS v1.9.1, released 12-Jan-2012 -======================================================= - - * `Unauthorized access by JavaScript in unrelated files`_ - * `Disclosure of file through embedded hyperlinks or JavaScript in that file`_ - * `Command-line arguments are leaked to other local users`_ - * `Capabilities may be leaked to web browser phishing filter / "safe browsing" servers`_ - * `Known issues in the FTP and SFTP frontends`_ - * `Traffic analysis based on sizes of files/directories, storage indices, and timing`_ - ----- - -Unauthorized access by JavaScript in unrelated files ----------------------------------------------------- - -If you view a file stored in Tahoe-LAFS through a web user interface, -JavaScript embedded in that file can, in some circumstances, access other -files or directories stored in Tahoe-LAFS that you view through the same -web user interface. Such a script would be able to send the contents of -those other files or directories to the author of the script, and if you -have the ability to modify the contents of those files or directories, -then that script could modify or delete those files or directories. - -This attack is known to be possible when an attacking tab or window could -reach a tab or window containing a Tahoe URI by navigating back or forward -in the history, either from itself or from any frame with a known name (as -specified by the "target" attribute of an HTML link). It might be possible -in other cases depending on the browser. - -*how to manage it* - -For future versions of Tahoe-LAFS, we are considering ways to close off -this leakage of authority while preserving ease of use -- the discussion -of this issue is ticket `#615`_. - -For the present, either do not view files stored in Tahoe-LAFS through a -web user interface, or turn off JavaScript in your web browser before -doing so, or limit your viewing to files which you know don't contain -malicious JavaScript. - -.. _#615: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/615 - - ----- - -Disclosure of file through embedded hyperlinks or JavaScript in that file -------------------------------------------------------------------------- - -If there is a file stored on a Tahoe-LAFS storage grid, and that file -gets downloaded and displayed in a web browser, then JavaScript or -hyperlinks within that file can leak the capability to that file to a -third party, which means that third party gets access to the file. - -If there is JavaScript in the file, then it could deliberately leak -the capability to the file out to some remote listener. - -If there are hyperlinks in the file, and they get followed, then -whichever server they point to receives the capability to the -file. Note that IMG tags are typically followed automatically by web -browsers, so being careful which hyperlinks you click on is not -sufficient to prevent this from happening. - -*how to manage it* - -For future versions of Tahoe-LAFS, we are considering ways to close off -this leakage of authority while preserving ease of use -- the discussion -of this issue is ticket `#127`_. - -For the present, a good work-around is that if you want to store and -view a file on Tahoe-LAFS and you want that file to remain private, then -remove from that file any hyperlinks pointing to other people's servers -and remove any JavaScript unless you are sure that the JavaScript is not -written to maliciously leak access. - -.. _#127: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/127 - - ----- - -Command-line arguments are leaked to other local users ------------------------------------------------------- - -Remember that command-line arguments are visible to other users (through -the 'ps' command, or the windows Process Explorer tool), so if you are -using a Tahoe-LAFS node on a shared host, other users on that host will -be able to see (and copy) any caps that you pass as command-line -arguments. This includes directory caps that you set up with the "tahoe -add-alias" command. - -*how to manage it* - -As of Tahoe-LAFS v1.3.0 there is a "tahoe create-alias" command that does -the following technique for you. - -Bypass add-alias and edit the NODEDIR/private/aliases file directly, by -adding a line like this: - - fun: URI:DIR2:ovjy4yhylqlfoqg2vcze36dhde:4d4f47qko2xm5g7osgo2yyidi5m4muyo2vjjy53q4vjju2u55mfa - -By entering the dircap through the editor, the command-line arguments -are bypassed, and other users will not be able to see them. Once you've -added the alias, if you use that alias instead of a cap itself on the -command-line, then no secrets are passed through the command line. Then -other processes on the system can still see your filenames and other -arguments you type there, but not the caps that Tahoe-LAFS uses to permit -access to your files and directories. - - ----- - -Capabilities may be leaked to web browser phishing filter / "safe browsing" servers ------------------------------------------------------------------------------------ - -Firefox, Internet Explorer, and Chrome include a "phishing filter" or -"safe browing" component, which is turned on by default, and which sends -any URLs that it deems suspicious to a central server. - -Microsoft gives `a brief description of their filter's operation`_. Firefox -and Chrome both use Google's `"safe browsing API"`_ (`specification`_). - -This of course has implications for the privacy of general web browsing -(especially in the cases of Firefox and Chrome, which send your main -personally identifying Google cookie along with these requests without your -explicit consent, as described in `Firefox bugzilla ticket #368255`_. - -The reason for documenting this issue here, though, is that when using the -Tahoe-LAFS web user interface, it could also affect confidentiality and integrity -by leaking capabilities to the filter server. - -Since IE's filter sends URLs by SSL/TLS, the exposure of caps is limited to -the filter server operators (or anyone able to hack the filter server) rather -than to network eavesdroppers. The "safe browsing API" protocol used by -Firefox and Chrome, on the other hand, is *not* encrypted, although the -URL components are normally hashed. - -Opera also has a similar facility that is disabled by default. A previous -version of this file stated that Firefox had abandoned their phishing -filter; this was incorrect. - -.. _a brief description of their filter's operation: https://blogs.msdn.com/ie/archive/2005/09/09/463204.aspx -.. _"safe browsing API": https://code.google.com/apis/safebrowsing/ -.. _specification: https://code.google.com/p/google-safe-browsing/wiki/Protocolv2Spec -.. _Firefox bugzilla ticket #368255: https://bugzilla.mozilla.org/show_bug.cgi?id=368255 - - -*how to manage it* - -If you use any phishing filter or "safe browsing" feature, consider either -disabling it, or not using the WUI via that browser. Phishing filters have -`very limited effectiveness`_ , and phishing or malware attackers have learnt -how to bypass them. - -.. _very limited effectiveness: http://lorrie.cranor.org/pubs/ndss-phish-tools-final.pdf - -To disable the filter in IE7 or IE8: -++++++++++++++++++++++++++++++++++++ - -- Click Internet Options from the Tools menu. - -- Click the Advanced tab. - -- If an "Enable SmartScreen Filter" option is present, uncheck it. - If a "Use Phishing Filter" or "Phishing Filter" option is present, - set it to Disable. - -- Confirm (click OK or Yes) out of all dialogs. - -If you have a version of IE that splits the settings between security -zones, do this for all zones. - -To disable the filter in Firefox: -+++++++++++++++++++++++++++++++++ - -- Click Options from the Tools menu. - -- Click the Security tab. - -- Uncheck both the "Block reported attack sites" and "Block reported - web forgeries" options. - -- Click OK. - -To disable the filter in Chrome: -++++++++++++++++++++++++++++++++ - -- Click Options from the Tools menu. - -- Click the "Under the Hood" tab and find the "Privacy" section. - -- Uncheck the "Enable phishing and malware protection" option. - -- Click Close. - - ----- - -Known issues in the FTP and SFTP frontends ------------------------------------------- - -These are documented in `docs/frontends/FTP-and-SFTP.rst`_ and on `the SftpFrontend page`_ on the wiki. - -.. _docs/frontends/FTP-and-SFTP.rst: frontends/FTP-and-SFTP.rst -.. _the SftpFrontend page: https://tahoe-lafs.org/trac/tahoe-lafs/wiki/SftpFrontend - - ----- - -Traffic analysis based on sizes of files/directories, storage indices, and timing ---------------------------------------------------------------------------------- - -Files and directories stored by Tahoe-LAFS are encrypted, but the ciphertext -reveals the exact size of the original file or directory representation. -This information is available to passive eavesdroppers and to server operators. - -For example, a large data set with known file sizes could probably be -identified with a high degree of confidence. - -Uploads and downloads of the same file or directory can be linked by server -operators, even without making assumptions based on file size. Anyone who -knows the introducer furl for a grid may be able to act as a server operator. -This implies that if such an attacker knows which file/directory is being -accessed in a particular request (by some other form of surveillance, say), -then they can identify later or earlier accesses of the same file/directory. - -Observing requests during a directory traversal (such as a deep-check -operation) could reveal information about the directory structure, i.e. -which files and subdirectories are linked from a given directory. - -Attackers can combine the above information with inferences based on timing -correlations. For instance, two files that are accessed close together in -time are likely to be related even if they are not linked in the directory -structure. Also, users that access the same files may be related to each other. - - ----- - -Known Issues in Tahoe-LAFS v1.9.0, released 31-Oct-2011 -======================================================= - - -Integrity Failure during Mutable Downloads ------------------------------------------- - -Under certain circumstances, the integrity-verification code of the mutable -downloader could be bypassed. Clients who receive carefully crafted shares -(from attackers) will emit incorrect file contents, and the usual -share-corruption errors would not be raised. This only affects mutable files -(not immutable), and only affects downloads that use doctored shares. It is -not persistent: the threat is resolved once you upgrade your client to a -version without the bug. However, read-modify-write operations (such as -directory manipulations) performed by vulnerable clients could cause the -attacker's modifications to be written back out to the mutable file, making -the corruption permanent. - -The attacker's ability to manipulate the file contents is limited. They can -modify FEC-encoded ciphertext in all but one share. This gives them the -ability to blindly flip bits in roughly 2/3rds of the file (for the default -k=3 encoding parameter). Confidentiality remains intact, unless the attacker -can deduce the file's contents by observing your reactions to corrupted -downloads. - -This bug was introduced in 1.9.0, as part of the MDMF-capable downloader, and -affects both SDMF and MDMF files. It was not present in 1.8.3. - -*how to manage it* - -There are three options: - -* Upgrade to 1.9.1, which fixes the bug -* Downgrade to 1.8.3, which does not contain the bug -* If using 1.9.0, do not trust the contents of mutable files (whether SDMF or - MDMF) that the 1.9.0 client emits, and do not modify directories (which - could write the corrupted data back into place, making the damage - persistent) - - -.. _#1654: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1654 - ----- - -Known Issues in Tahoe-LAFS v1.8.2, released 30-Jan-2011 -======================================================= - - -Unauthorized deletion of an immutable file by its storage index ---------------------------------------------------------------- - -Due to a flaw in the Tahoe-LAFS storage server software in v1.3.0 through -v1.8.2, a person who knows the "storage index" that identifies an immutable -file can cause the server to delete its shares of that file. - -If an attacker can cause enough shares to be deleted from enough storage -servers, this deletes the file. - -This vulnerability does not enable anyone to read file contents without -authorization (confidentiality), nor to change the contents of a file -(integrity). - -A person could learn the storage index of a file in several ways: - -1. By being granted the authority to read the immutable file—i.e. by being - granted a read capability to the file. They can determine the file's - storage index from its read capability. - -2. By being granted a verify capability to the file. They can determine the - file's storage index from its verify capability. This case probably - doesn't happen often because users typically don't share verify caps. - -3. By operating a storage server, and receiving a request from a client that - has a read cap or a verify cap. If the client attempts to upload, - download, or verify the file with their storage server, even if it doesn't - actually have the file, then they can learn the storage index of the file. - -4. By gaining read access to an existing storage server's local filesystem, - and inspecting the directory structure that it stores its shares in. They - can thus learn the storage indexes of all files that the server is holding - at least one share of. Normally only the operator of an existing storage - server would be able to inspect its local filesystem, so this requires - either being such an operator of an existing storage server, or somehow - gaining the ability to inspect the local filesystem of an existing storage - server. - -*how to manage it* - -Tahoe-LAFS version v1.8.3 or newer (except v1.9a1) no longer has this flaw; -if you upgrade a storage server to a fixed release then that server is no -longer vulnerable to this problem. - -Note that the issue is local to each storage server independently of other -storage servers—when you upgrade a storage server then that particular -storage server can no longer be tricked into deleting its shares of the -target file. - -If you can't immediately upgrade your storage server to a version of -Tahoe-LAFS that eliminates this vulnerability, then you could temporarily -shut down your storage server. This would of course negatively impact -availability—clients would not be able to upload or download shares to that -particular storage server while it was shut down—but it would protect the -shares already stored on that server from being deleted as long as the server -is shut down. - -If the servers that store shares of your file are running a version of -Tahoe-LAFS with this vulnerability, then you should think about whether -someone can learn the storage indexes of your files by one of the methods -described above. A person can not exploit this vulnerability unless they have -received a read cap or verify cap, or they control a storage server that has -been queried about this file by a client that has a read cap or a verify cap. - -Tahoe-LAFS does not currently have a mechanism to limit which storage servers -can connect to your grid, but it does have a way to see which storage servers -have been connected to the grid. The Introducer's front page in the Web User -Interface has a list of all storage servers that the Introducer has ever seen -and the first time and the most recent time that it saw them. Each Tahoe-LAFS -gateway maintains a similar list on its front page in its Web User Interface, -showing all of the storage servers that it learned about from the Introducer, -when it first connected to that storage server, and when it most recently -connected to that storage server. These lists are stored in memory and are -reset to empty when the process is restarted. - -See ticket `#1528`_ for technical details. - -.. _#1528: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1528 diff -Nru tahoe-lafs-1.9.2/docs/known_issues.rst.rej tahoe-lafs-1.10.0/docs/known_issues.rst.rej --- tahoe-lafs-1.9.2/docs/known_issues.rst.rej 2012-06-21 23:32:19.000000000 +0000 +++ tahoe-lafs-1.10.0/docs/known_issues.rst.rej 1970-01-01 00:00:00.000000000 +0000 @@ -1,61 +0,0 @@ ---- docs/known_issues.rst -+++ docs/known_issues.rst -@@ -14,10 +14,9 @@ - .. _the "historical known issues" document: historical/historical_known_issues.txt - - --Known Issues in Tahoe-LAFS v1.9.0, released 31-Oct-2011 -+Known Issues in Tahoe-LAFS v1.9.1, released 12-Jan-2012 - ======================================================= - -- * `Integrity Failure during Mutable Downloads`_ - * `Potential unauthorized access by JavaScript in unrelated files`_ - * `Potential disclosure of file through embedded hyperlinks or JavaScript in that file`_ - * `Command-line arguments are leaked to other local users`_ -@@ -27,46 +26,6 @@ - - ---- - --Integrity Failure during Mutable Downloads ---------------------------------------------------------------- -- --Under certain circumstances, the integrity-verification code of the mutable --downloader could be bypassed. Clients who receive carefully crafted shares --(from attackers) will emit incorrect file contents, and the usual --share-corruption errors would not be raised. This only affects mutable files --(not immutable), and only affects downloads that use doctored shares. It is --not persistent: the threat is resolved once you upgrade your client to a --version without the bug. However, read-modify-write operations (such as --directory manipulations) performed by vulnerable clients could cause the --attacker's modifications to be written back out to the mutable file, making --the corruption permanent. -- --The attacker's ability to manipulate the file contents is limited. They can --modify FEC-encoded ciphertext in all but one share. This gives them the --ability to blindly flip bits in roughly 2/3rds of the file (for the default --k=3 encoding parameter). Confidentiality remains intact, unless the attacker --can deduce the file's contents by observing your reactions to corrupted --downloads. -- --This bug was introduced in 1.9.0, as part of the MDMF-capable downloader, and --affects both SDMF and MDMF files. It was not present in 1.8.3. -- --*how to manage it* -- --There are three options: -- --* Upgrade to 1.9.1, which fixes the bug --* Downgrade to 1.8.3, which does not contain the bug --* If using 1.9.0, do not trust the contents of mutable files (whether SDMF or -- MDMF) that the 1.9.0 client emits, and do not modify directories (which -- could write the corrupted data back into place, making the damage -- persistent) -- -- --.. _#1654: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1654 -- ------ -- - Potential unauthorized access by JavaScript in unrelated files - -------------------------------------------------------------- - diff -Nru tahoe-lafs-1.9.2/docs/logging.rst tahoe-lafs-1.10.0/docs/logging.rst --- tahoe-lafs-1.9.2/docs/logging.rst 2012-05-14 02:24:30.000000000 +0000 +++ tahoe-lafs-1.10.0/docs/logging.rst 2013-09-03 15:38:27.000000000 +0000 @@ -24,14 +24,18 @@ went wrong. The Foolscap logging system is documented at -``_. +``__. The Foolscap distribution includes a utility named "``flogtool``" that is -used to get access to many Foolscap logging features. However, using this -command directly on Tahoe log files may fail, due to use of an incorrect -PYTHONPATH. Installing Foolscap v0.6.1 or later and then running -``bin/tahoe @flogtool`` from the root of a Tahoe-LAFS source distribution -may avoid this problem (but only on Unix, not Windows). +used to get access to many Foolscap logging features. This command only +works when foolscap and its dependencies are installed correctly. +Tahoe-LAFS v1.10.0 and later include a ``tahoe debug flogtool`` command +that can be used even when foolscap is not installed; to use this, prefix +all of the example commands below with ``tahoe debug``. + +For earlier versions since Tahoe-LAFS v1.8.2, installing Foolscap v0.6.1 +or later and then running ``bin/tahoe @flogtool`` from the root of a +Tahoe-LAFS source distribution may work (but only on Unix, not Windows). Realtime Logging @@ -180,7 +184,7 @@ command, and start it with "``tahoe start``". Then copy the contents of the ``log_gatherer.furl`` file it creates into the ``BASEDIR/tahoe.cfg`` file (under the key ``log_gatherer.furl`` of the section ``[node]``) of all nodes -that should be sending it log events. (See ``_.) +that should be sending it log events. (See ``__.) The "``flogtool filter``" command, described above, is useful to cut down the potentially large flogfiles into a more focussed form. @@ -264,13 +268,17 @@ With ``FLOGTOTWISTED=1``, sufficiently-important log events will be written into ``_trial_temp/test.log``, which may give you more ideas about why the -test is failing. Note, however, that ``_trial_temp/log.out`` will not receive -messages below the ``level=OPERATIONAL`` threshold, due to this issue: -``_ +test is failing. +By default, ``_trial_temp/test.log`` will not receive messages below the +``level=OPERATIONAL`` threshold. You can change the threshold via the ``FLOGLEVEL`` +variable, e.g.:: -If that isn't enough, look at the detailed foolscap logging messages instead, -by running the tests like this:: + make test FLOGLEVEL=10 FLOGTOTWISTED=1 + +(The level numbers are listed in src/allmydata/util/log.py.) + +To look at the detailed foolscap logging messages, run the tests like this:: make test FLOGFILE=flog.out.bz2 FLOGLEVEL=1 FLOGTOTWISTED=1 diff -Nru tahoe-lafs-1.9.2/docs/man/tahoe.1 tahoe-lafs-1.10.0/docs/man/tahoe.1 --- tahoe-lafs-1.9.2/docs/man/tahoe.1 2012-05-14 02:24:30.000000000 +0000 +++ tahoe-lafs-1.10.0/docs/man/tahoe.1 2013-09-03 15:38:27.000000000 +0000 @@ -133,7 +133,7 @@ Display help and exit .RS .RE -.SS USING THE FILSYSTEM +.SS USING THE FILESYSTEM .TP .B \f[B]mkdir\f[] Create a new directory. @@ -287,4 +287,4 @@ tahoe-dev mailing list: .SH COPYRIGHT .PP -Copyright \@ 2006\[en]2012 The Tahoe-LAFS Software Foundation +Copyright \@ 2006\[en]2013 The Tahoe-LAFS Software Foundation diff -Nru tahoe-lafs-1.9.2/docs/network-and-reliance-topology.svg tahoe-lafs-1.10.0/docs/network-and-reliance-topology.svg --- tahoe-lafs-1.9.2/docs/network-and-reliance-topology.svg 2012-05-14 02:07:12.000000000 +0000 +++ tahoe-lafs-1.10.0/docs/network-and-reliance-topology.svg 2013-09-03 15:38:27.000000000 +0000 @@ -1,5 +1,6 @@ + + + + + + + + + + + + + + + + + showguides="true" + inkscape:window-maximized="0"> Tahoe-LAFS network topology Tahoe-LAFS storage servers Tahoe-LAFS client Tahoe-LAFS gateway + inkscape:connection-start="#rect3283" + inkscape:connection-start-point="d4" + inkscape:connector-curvature="0" + sodipodi:nodetypes="cc" /> + inkscape:connection-start="#rect3283" + inkscape:connection-start-point="d4" + inkscape:connector-curvature="0" + sodipodi:nodetypes="cc" /> + inkscape:connection-start="#rect3283" + inkscape:connection-start-point="d4" + inkscape:connector-curvature="0" + sodipodi:nodetypes="cc" /> + inkscape:connection-start="#rect3283" + inkscape:connection-start-point="d4" + inkscape:connector-curvature="0" + sodipodi:nodetypes="cc" /> + inkscape:connection-start="#rect3283" + inkscape:connection-start-point="d4" + inkscape:connector-curvature="0" + sodipodi:nodetypes="cc" /> + inkscape:connection-start="#rect3283" + inkscape:connection-start-point="d4" + inkscape:connector-curvature="0" + sodipodi:nodetypes="cc" /> over TCP/SSL + inkscape:connection-end="#rect2791" + inkscape:connection-end-point="d4" + inkscape:connector-curvature="0" + sodipodi:nodetypes="cc" /> over HTTP(S)or (S)FTP + id="tspan2831">FTP HTTP(S)server Tahoe-LAFSstorageclient Red means that whoever controls that link or that machine cansee your files and change their contents. In other words, yourely on that component for confidentiality and integrity. Black means that whoever controls that link or that machine cannotsee your files or change their contents. In other words, youdo not rely on that component for confidentiality and integrity. • web browser• command-line tool• Web browser• Windows virtual drive• Command-line tool• JavaScript frontends• tahoe backup tool• tahoe backup tool• JavaScript frontends• duplicity• (S)FTP client• GridBackup (incomplete)• GridBackup (incomplete)• FTP and SFTP clients• FUSE (incomplete)• FUSE via sshfs + style="font-size:11px;font-style:oblique;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:100%;writing-mode:lr-tb;text-anchor:start;font-family:DejaVu Sans;-inkscape-font-specification:DejaVu Sans Oblique" /> @@ -670,135 +751,148 @@ inkscape:export-xdpi="151" inkscape:export-filename="/home/zooko/playground/allmydata/tahoe/trunk/trunk/docs/network-and-reliance-topology.png" transform="matrix(1.8356478,0,0,1.7055983,609.62935,-243.81548)" - d="M -245,306.36218 A 15,5 0 1 1 -275,306.36218 A 15,5 0 1 1 -245,306.36218 z" + d="m -245,306.36218 c 0,2.76143 -6.71573,5 -15,5 -8.28427,0 -15,-2.23857 -15,-5 0,-2.76142 6.71573,-5 15,-5 8.28427,0 15,2.23858 15,5 z" sodipodi:ry="5" sodipodi:rx="15" sodipodi:cy="306.36218" sodipodi:cx="-260" id="path5859" - style="opacity:0.75702485;fill:#000000;fill-opacity:0.87815121;fill-rule:nonzero;stroke:#000000;stroke-width:2;stroke-linecap:butt;stroke-linejoin:round;marker:none;marker-start:none;marker-mid:none;marker-end:none;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;visibility:visible;display:inline;overflow:visible;enable-background:accumulate" + style="opacity:0.75702485;fill:#000000;fill-opacity:0.87815121;fill-rule:nonzero;stroke:#000000;stroke-width:2;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate" sodipodi:type="arc" /> + d="m 129.50211,285.26188 c -13.87895,0.43698 -24.72388,4.11476 -24.72388,8.528 0,4.70745 12.33555,8.52799 27.53472,8.52799 15.19916,0 27.53471,-3.82054 27.53471,-8.52799 0,-4.70746 -12.33555,-8.528 -27.53471,-8.528 -0.94995,0 -1.88558,-0.0291 -2.81084,0 z" + style="opacity:0.75702485;fill:#000000;fill-opacity:0.87815121;fill-rule:nonzero;stroke:#000000;stroke-width:3.53885722;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate" + inkscape:connector-curvature="0" /> + d="m 104.51272,278.92156 c 0,44.98967 0,44.98967 0,44.98967" + style="fill:none;stroke:#000000;stroke-width:3;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none" + inkscape:connector-curvature="0" /> + d="m 160.20443,278.81929 c 0,44.98967 0,44.98967 0,44.98967" + style="fill:none;stroke:#000000;stroke-width:3;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none" + inkscape:connector-curvature="0" /> + d="m 128.84004,333.94646 c -9.55378,-0.35398 -18.0393,-2.28297 -22.28863,-5.06686 -1.79809,-1.17799 -2.86957,-2.43124 -3.28994,-3.84803 -0.0783,-0.26405 -0.14244,-0.59219 -0.14244,-0.72921 0,-0.24002 0.0162,-0.24912 0.44195,-0.24912 l 0.44194,0 0,-1.59565 0,-1.59565 0.7734,-0.78073 c 1.37242,-1.38543 3.5568,-2.61944 6.28033,-3.54794 0.57165,-0.19487 0.78387,-0.23124 0.95966,-0.1644 0.89212,0.33918 4.16622,1.14142 5.95197,1.45837 0.46183,0.082 0.89198,0.17698 0.95589,0.21113 0.0639,0.0341 -0.27417,0.13069 -0.7513,0.21454 -2.16366,0.38022 -5.27597,1.26641 -7.31986,2.08423 -1.26485,0.5061 -2.73088,1.37245 -3.43155,2.02784 -0.61328,0.57365 -0.88935,1.07747 -0.83056,1.51574 0.10121,0.75461 1.03778,1.65011 2.65467,2.53826 3.03786,1.66869 8.27963,3.00706 14.24698,3.63764 3.43631,0.36313 4.34573,0.40478 8.83883,0.40478 4.49781,0 5.42336,-0.0425 8.83461,-0.40564 6.70706,-0.71397 12.03545,-2.18596 15.15792,-4.18741 0.9919,-0.63579 1.77268,-1.55176 1.77268,-2.07959 0,-1.83648 -5.36139,-4.39355 -11.59973,-5.53239 -0.47331,-0.0864 -0.84731,-0.17033 -0.83113,-0.18651 0.0162,-0.0162 0.72162,-0.16617 1.56765,-0.33331 2.47542,-0.48906 5.64829,-1.3148 5.64829,-1.46998 0,-0.0398 -0.27842,-0.15364 -0.61871,-0.25293 -0.3403,-0.0993 -0.8872,-0.25861 -1.21534,-0.35405 -0.92376,-0.26868 -2.84828,-0.70143 -4.29994,-0.96689 l -1.31683,-0.24079 0.65392,-0.11255 c 3.18241,-0.5477 6.84005,-1.65813 8.9323,-2.71177 2.30807,-1.16233 3.37088,-2.35285 2.99142,-3.3509 -0.23179,-0.60965 -1.60641,-1.72631 -2.97062,-2.41315 -1.9679,-0.99078 -5.64494,-2.1244 -8.55478,-2.6374 l -0.87431,-0.15414 1.31625,-0.24145 c 2.43646,-0.44695 6.04503,-1.38809 6.04503,-1.57659 0,-0.16039 -3.77217,-1.13994 -6.00007,-1.55808 l -1.27132,-0.2386 1.4039,-0.28649 c 4.97987,-1.01622 8.81978,-2.58023 10.32405,-4.20502 0.83053,-0.89707 0.84809,-1.43461 0.0749,-2.29454 -1.05912,-1.17799 -3.67536,-2.47617 -6.82959,-3.38885 -1.31483,-0.38044 -3.76303,-0.95533 -4.59619,-1.07928 -0.69387,-0.10323 -0.63712,-0.16815 0.28981,-0.33148 1.49071,-0.26268 3.64552,-0.75469 5.12759,-1.17079 l 1.39572,-0.39186 0.7409,0.2615 c 2.77417,0.97916 4.85102,2.15234 6.21247,3.50932 l 0.7292,0.7268 0,2.96706 0,2.96706 -0.62748,0.66057 c -1.067,1.12327 -2.90203,2.24749 -5.19388,3.18202 -0.5172,0.21089 -1.10939,0.43336 -1.316,0.49438 -0.20661,0.061 -0.37565,0.14124 -0.37565,0.17825 0,0.037 0.20882,0.135 0.46404,0.21774 2.52291,0.81791 5.13472,2.28602 6.40815,3.60208 l 0.64082,0.66226 0,3.01998 0,3.01998 -0.68501,0.68288 c -1.34606,1.34185 -3.86365,2.74195 -6.45235,3.58832 -0.25522,0.0834 -0.46404,0.18199 -0.46404,0.21901 0,0.037 0.16904,0.1169 0.37565,0.17754 0.70041,0.20554 2.58155,0.99039 3.38086,1.41053 1.28631,0.67613 2.3365,1.41391 3.11569,2.18881 l 0.7292,0.72518 0,1.72989 0,1.72989 0.48614,0 c 0.57517,0 0.59074,0.0469 0.30199,0.90876 -0.55901,1.66855 -2.08207,3.18021 -4.50044,4.46673 -5.78113,3.07547 -16.66459,4.76162 -28.01911,4.34094 l 0,10e-6 z" + style="opacity:0.75702485;fill:#000000;fill-opacity:0.87815121;fill-rule:nonzero;stroke:#000000;stroke-width:0.17677669;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate" + inkscape:connector-curvature="0" /> + d="m 111.42754,316.1251 c -3.10778,-1.11627 -5.21391,-2.29562 -6.60703,-3.69969 l -0.81759,-0.82402 0,-2.81338 0,-2.81337 0.41984,-0.50506 c 1.06123,-1.27659 2.96244,-2.48815 5.5783,-3.55478 1.61505,-0.65855 1.44972,-0.64809 3.01747,-0.19098 1.56699,0.4569 2.85288,0.76221 4.68458,1.11228 1.15542,0.22082 1.24682,0.25126 0.92808,0.3091 -6.05073,1.09801 -10.93674,3.17996 -11.85169,5.05006 -0.21536,0.44018 -0.22853,0.52104 -0.13545,0.83173 0.0568,0.1894 0.3084,0.56876 0.55967,0.84369 1.57122,1.71916 5.74995,3.388 10.98553,4.38723 0.58336,0.11134 0.96122,0.21541 0.83969,0.23127 -1.27847,0.16687 -4.13999,0.80568 -6.02435,1.34486 -1.25299,0.35852 -1.34298,0.37513 -1.57705,0.29106 z" + style="opacity:0.75702485;fill:#000000;fill-opacity:0.87815121;fill-rule:nonzero;stroke:#000000;stroke-width:0.17677669;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate" + inkscape:connector-curvature="0" /> + d="m 111.5536,301.25626 c -0.045,-0.0423 -0.36029,-0.17641 -0.70059,-0.29795 -2.98664,-1.06672 -5.29595,-2.45378 -6.47182,-3.88726 l -0.37827,-0.46114 0,-2.86361 0,-2.86362 0.81759,-0.81133 c 0.96413,-0.95675 1.99468,-1.6618 3.51344,-2.40369 1.12874,-0.55138 3.15058,-1.35624 3.40691,-1.35624 0.0783,0 0.68619,0.15689 1.35084,0.34865 1.87621,0.5413 5.4076,1.31736 6.08955,1.33824 0.13499,0.004 -0.15231,0.0797 -0.63845,0.16791 -5.27086,0.95653 -9.67144,2.6835 -11.3361,4.44875 -0.73345,0.77777 -0.82416,1.25546 -0.3689,1.94278 0.84622,1.27756 3.69371,2.76474 7.21735,3.76945 1.3114,0.37393 3.12178,0.80159 4.16219,0.98323 0.72873,0.12722 0.69,0.19167 -0.20487,0.34097 -1.75461,0.29273 -6.00723,1.37494 -6.27448,1.59674 -0.0584,0.0485 -0.13774,0.052 -0.18439,0.008 l 0,0 z" + style="opacity:0.75702485;fill:#000000;fill-opacity:0.87815121;fill-rule:nonzero;stroke:#000000;stroke-width:0.17677669;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate" + inkscape:connector-curvature="0" /> + d="m 110.89434,285.8727 c -2.97974,-1.05932 -5.15513,-2.36041 -6.44977,-3.85759 l -0.39746,-0.45963 -0.0442,-1.26806 -0.0442,-1.26804 -0.41984,-0.0269 -0.41985,-0.0269 0,-0.41853 c 0,-2.04404 1.89922,-4.27268 4.94131,-5.7984 7.8506,-3.93735 23.33188,-5.37648 36.60122,-3.40242 6.96201,1.03573 12.37864,2.99776 15.01528,5.43888 1.14578,1.06082 1.83041,2.37849 1.93171,3.71784 l 0.0491,0.64909 -0.5431,0 -0.54308,0 0,1.29299 0,1.293 -0.7292,0.72794 c -1.34862,1.34626 -3.49442,2.54869 -6.24118,3.49732 l -0.73899,0.25523 -1.02877,-0.30443 c -1.36151,-0.40289 -3.30364,-0.86466 -4.82947,-1.14827 -0.68059,-0.12651 -1.2971,-0.25257 -1.37002,-0.28014 -0.0729,-0.0276 0.50381,-0.18009 1.28163,-0.33894 4.3054,-0.87926 7.47825,-2.0111 9.54594,-3.40527 0.6798,-0.45836 1.41207,-1.19087 1.54806,-1.54855 0.22369,-0.58836 0.003,-1.17927 -0.6779,-1.81513 -3.16521,-2.95581 -12.32714,-5.13291 -22.61369,-5.37357 -7.26643,-0.17 -13.812,0.44413 -19.25547,1.80661 -4.75881,1.19111 -8.29591,3.03876 -8.81221,4.60318 -0.14583,0.44185 0.0807,0.98274 0.65011,1.55212 1.69707,1.69707 5.63092,3.23496 10.88895,4.25688 l 1.06066,0.20615 -0.88389,0.15531 c -1.89698,0.33331 -5.08622,1.10202 -6.19472,1.49312 -0.33577,0.11847 -0.39379,0.10916 -1.27696,-0.20481 l 0,0 z" + style="opacity:0.75702485;fill:#000000;fill-opacity:0.87815121;fill-rule:nonzero;stroke:#000000;stroke-width:0.17677669;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate" + inkscape:connector-curvature="0" /> + d="m 104.00292,301.29849 0,-4.46491 0.37565,0.41213 c 1.26058,1.383 2.85364,2.38503 5.54637,3.48865 0.80212,0.32875 1.39653,0.59893 1.3209,0.6004 -0.22966,0.004 -1.64718,0.59841 -2.95609,1.2386 -1.79001,0.87551 -3.0005,1.73092 -3.91118,2.76393 l -0.37565,0.4261 0,-4.4649 z" + style="opacity:0.75702485;fill:#000000;fill-opacity:0.87815121;fill-rule:nonzero;stroke:#000000;stroke-width:0.17677669;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate" + inkscape:connector-curvature="0" /> + d="m 104.00292,286.20941 0,-4.45445 0.7734,0.75827 c 0.85461,0.83789 2.02948,1.66147 3.23091,2.26487 0.86357,0.43371 2.45502,1.0994 3.04466,1.27357 0.43559,0.12865 0.47418,0.2162 0.13258,0.30072 -0.3889,0.0962 -2.28836,0.90937 -3.21955,1.37828 -1.40705,0.70853 -2.17903,1.25122 -3.34651,2.35256 l -0.61549,0.58063 0,-4.45445 z" + style="opacity:0.75702485;fill:#000000;fill-opacity:0.87815121;fill-rule:nonzero;stroke:#000000;stroke-width:0.17677669;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate" + inkscape:connector-curvature="0" /> + d="m 104.00292,316.2336 0,-4.3939 0.7734,0.7244 c 1.34707,1.26172 3.30309,2.41097 5.56846,3.27175 0.5469,0.20781 0.99437,0.40523 0.99437,0.43872 0,0.0335 -0.16904,0.1105 -0.37565,0.17113 -0.56633,0.16617 -2.0255,0.76261 -2.74379,1.12152 -1.39667,0.69789 -2.56351,1.50067 -3.59351,2.47231 l -0.62328,0.58798 0,-4.39391 z" + style="opacity:0.75702485;fill:#000000;fill-opacity:0.87815121;fill-rule:nonzero;stroke:#000000;stroke-width:0.17677669;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate" + inkscape:connector-curvature="0" /> + d="m 159.86435,319.89194 c -1.31096,-1.22228 -3.06266,-2.24617 -5.38968,-3.15032 -0.56016,-0.21764 -1.04794,-0.42322 -1.08396,-0.45684 -0.036,-0.0336 0.36174,-0.21554 0.88388,-0.40427 2.03146,-0.73426 4.16877,-1.96276 5.545,-3.1872 l 0.75187,-0.66894 0,4.24667 c 0,2.33567 -0.01,4.24493 -0.0221,4.24282 -0.0122,-0.002 -0.3204,-0.28198 -0.68501,-0.62192 z" + style="opacity:0.75702485;fill:#000000;fill-opacity:0.87815121;fill-rule:nonzero;stroke:#000000;stroke-width:0.17677669;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate" + inkscape:connector-curvature="0" /> + d="m 159.90279,304.88522 c -1.21447,-1.16855 -3.39408,-2.44074 -5.59677,-3.26672 -0.50488,-0.18932 -0.88274,-0.3653 -0.83969,-0.39106 0.043,-0.0258 0.51578,-0.21725 1.05053,-0.42553 2.14873,-0.83689 4.19012,-2.0469 5.41296,-3.20849 l 0.64164,-0.60949 0,4.25388 c 0,2.33963 -0.01,4.25261 -0.0221,4.25107 -0.0122,-0.002 -0.30311,-0.27319 -0.64657,-0.60367 l 0,1e-5 z" + style="opacity:0.75702485;fill:#000000;fill-opacity:0.87815121;fill-rule:nonzero;stroke:#000000;stroke-width:0.17677669;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate" + inkscape:connector-curvature="0" /> + d="m 159.95274,290.00706 c -1.46246,-1.37246 -2.90851,-2.23054 -5.40185,-3.20541 l -1.3802,-0.53963 1.24762,-0.4807 c 2.59064,-0.99815 4.06312,-1.86608 5.59152,-3.29583 l 0.56163,-0.52537 0,4.29696 c 0,2.36334 -0.01,4.29522 -0.0221,4.29306 -0.0122,-0.002 -0.28063,-0.24654 -0.59662,-0.54308 l 0,0 z" + style="opacity:0.75702485;fill:#000000;fill-opacity:0.87815121;fill-rule:nonzero;stroke:#000000;stroke-width:0.17677669;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate" + inkscape:connector-curvature="0" /> @@ -807,135 +901,148 @@ inkscape:export-xdpi="151" inkscape:export-filename="/home/zooko/playground/allmydata/tahoe/trunk/trunk/docs/network-and-reliance-topology.png" transform="matrix(1.8356478,0,0,1.7055983,609.62935,-243.81548)" - d="M -245,306.36218 A 15,5 0 1 1 -275,306.36218 A 15,5 0 1 1 -245,306.36218 z" + d="m -245,306.36218 c 0,2.76143 -6.71573,5 -15,5 -8.28427,0 -15,-2.23857 -15,-5 0,-2.76142 6.71573,-5 15,-5 8.28427,0 15,2.23858 15,5 z" sodipodi:ry="5" sodipodi:rx="15" sodipodi:cy="306.36218" sodipodi:cx="-260" - id="path5945" - style="opacity:0.75702485;fill:#000000;fill-opacity:0.87815121;fill-rule:nonzero;stroke:#000000;stroke-width:2;stroke-linecap:butt;stroke-linejoin:round;marker:none;marker-start:none;marker-mid:none;marker-end:none;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;visibility:visible;display:inline;overflow:visible;enable-background:accumulate" + id="path5979" + style="opacity:0.75702485;fill:#000000;fill-opacity:0.87815121;fill-rule:nonzero;stroke:#000000;stroke-width:2;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate" sodipodi:type="arc" /> + id="path5981" + d="m 129.50211,285.26188 c -13.87895,0.43698 -24.72388,4.11476 -24.72388,8.528 0,4.70745 12.33555,8.52799 27.53472,8.52799 15.19916,0 27.53471,-3.82054 27.53471,-8.52799 0,-4.70746 -12.33555,-8.528 -27.53471,-8.528 -0.94995,0 -1.88558,-0.0291 -2.81084,0 z" + style="opacity:0.75702485;fill:#000000;fill-opacity:0.87815121;fill-rule:nonzero;stroke:#000000;stroke-width:3.53885722;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate" + inkscape:connector-curvature="0" /> + id="path5987" + d="m 104.51272,278.92156 c 0,44.98967 0,44.98967 0,44.98967" + style="fill:none;stroke:#000000;stroke-width:3;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none" + inkscape:connector-curvature="0" /> + id="path5989" + d="m 160.20443,278.81929 c 0,44.98967 0,44.98967 0,44.98967" + style="fill:none;stroke:#000000;stroke-width:3;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none" + inkscape:connector-curvature="0" /> + id="path5991" + d="m 128.84004,333.94646 c -9.55378,-0.35398 -18.0393,-2.28297 -22.28863,-5.06686 -1.79809,-1.17799 -2.86957,-2.43124 -3.28994,-3.84803 -0.0783,-0.26405 -0.14244,-0.59219 -0.14244,-0.72921 0,-0.24002 0.0162,-0.24912 0.44195,-0.24912 l 0.44194,0 0,-1.59565 0,-1.59565 0.7734,-0.78073 c 1.37242,-1.38543 3.5568,-2.61944 6.28033,-3.54794 0.57165,-0.19487 0.78387,-0.23124 0.95966,-0.1644 0.89212,0.33918 4.16622,1.14142 5.95197,1.45837 0.46183,0.082 0.89198,0.17698 0.95589,0.21113 0.0639,0.0341 -0.27417,0.13069 -0.7513,0.21454 -2.16366,0.38022 -5.27597,1.26641 -7.31986,2.08423 -1.26485,0.5061 -2.73088,1.37245 -3.43155,2.02784 -0.61328,0.57365 -0.88935,1.07747 -0.83056,1.51574 0.10121,0.75461 1.03778,1.65011 2.65467,2.53826 3.03786,1.66869 8.27963,3.00706 14.24698,3.63764 3.43631,0.36313 4.34573,0.40478 8.83883,0.40478 4.49781,0 5.42336,-0.0425 8.83461,-0.40564 6.70706,-0.71397 12.03545,-2.18596 15.15792,-4.18741 0.9919,-0.63579 1.77268,-1.55176 1.77268,-2.07959 0,-1.83648 -5.36139,-4.39355 -11.59973,-5.53239 -0.47331,-0.0864 -0.84731,-0.17033 -0.83113,-0.18651 0.0162,-0.0162 0.72162,-0.16617 1.56765,-0.33331 2.47542,-0.48906 5.64829,-1.3148 5.64829,-1.46998 0,-0.0398 -0.27842,-0.15364 -0.61871,-0.25293 -0.3403,-0.0993 -0.8872,-0.25861 -1.21534,-0.35405 -0.92376,-0.26868 -2.84828,-0.70143 -4.29994,-0.96689 l -1.31683,-0.24079 0.65392,-0.11255 c 3.18241,-0.5477 6.84005,-1.65813 8.9323,-2.71177 2.30807,-1.16233 3.37088,-2.35285 2.99142,-3.3509 -0.23179,-0.60965 -1.60641,-1.72631 -2.97062,-2.41315 -1.9679,-0.99078 -5.64494,-2.1244 -8.55478,-2.6374 l -0.87431,-0.15414 1.31625,-0.24145 c 2.43646,-0.44695 6.04503,-1.38809 6.04503,-1.57659 0,-0.16039 -3.77217,-1.13994 -6.00007,-1.55808 l -1.27132,-0.2386 1.4039,-0.28649 c 4.97987,-1.01622 8.81978,-2.58023 10.32405,-4.20502 0.83053,-0.89707 0.84809,-1.43461 0.0749,-2.29454 -1.05912,-1.17799 -3.67536,-2.47617 -6.82959,-3.38885 -1.31483,-0.38044 -3.76303,-0.95533 -4.59619,-1.07928 -0.69387,-0.10323 -0.63712,-0.16815 0.28981,-0.33148 1.49071,-0.26268 3.64552,-0.75469 5.12759,-1.17079 l 1.39572,-0.39186 0.7409,0.2615 c 2.77417,0.97916 4.85102,2.15234 6.21247,3.50932 l 0.7292,0.7268 0,2.96706 0,2.96706 -0.62748,0.66057 c -1.067,1.12327 -2.90203,2.24749 -5.19388,3.18202 -0.5172,0.21089 -1.10939,0.43336 -1.316,0.49438 -0.20661,0.061 -0.37565,0.14124 -0.37565,0.17825 0,0.037 0.20882,0.135 0.46404,0.21774 2.52291,0.81791 5.13472,2.28602 6.40815,3.60208 l 0.64082,0.66226 0,3.01998 0,3.01998 -0.68501,0.68288 c -1.34606,1.34185 -3.86365,2.74195 -6.45235,3.58832 -0.25522,0.0834 -0.46404,0.18199 -0.46404,0.21901 0,0.037 0.16904,0.1169 0.37565,0.17754 0.70041,0.20554 2.58155,0.99039 3.38086,1.41053 1.28631,0.67613 2.3365,1.41391 3.11569,2.18881 l 0.7292,0.72518 0,1.72989 0,1.72989 0.48614,0 c 0.57517,0 0.59074,0.0469 0.30199,0.90876 -0.55901,1.66855 -2.08207,3.18021 -4.50044,4.46673 -5.78113,3.07547 -16.66459,4.76162 -28.01911,4.34094 l 0,10e-6 z" + style="opacity:0.75702485;fill:#000000;fill-opacity:0.87815121;fill-rule:nonzero;stroke:#000000;stroke-width:0.17677669;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate" + inkscape:connector-curvature="0" /> + id="path5993" + d="m 111.42754,316.1251 c -3.10778,-1.11627 -5.21391,-2.29562 -6.60703,-3.69969 l -0.81759,-0.82402 0,-2.81338 0,-2.81337 0.41984,-0.50506 c 1.06123,-1.27659 2.96244,-2.48815 5.5783,-3.55478 1.61505,-0.65855 1.44972,-0.64809 3.01747,-0.19098 1.56699,0.4569 2.85288,0.76221 4.68458,1.11228 1.15542,0.22082 1.24682,0.25126 0.92808,0.3091 -6.05073,1.09801 -10.93674,3.17996 -11.85169,5.05006 -0.21536,0.44018 -0.22853,0.52104 -0.13545,0.83173 0.0568,0.1894 0.3084,0.56876 0.55967,0.84369 1.57122,1.71916 5.74995,3.388 10.98553,4.38723 0.58336,0.11134 0.96122,0.21541 0.83969,0.23127 -1.27847,0.16687 -4.13999,0.80568 -6.02435,1.34486 -1.25299,0.35852 -1.34298,0.37513 -1.57705,0.29106 z" + style="opacity:0.75702485;fill:#000000;fill-opacity:0.87815121;fill-rule:nonzero;stroke:#000000;stroke-width:0.17677669;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate" + inkscape:connector-curvature="0" /> + id="path5995" + d="m 111.5536,301.25626 c -0.045,-0.0423 -0.36029,-0.17641 -0.70059,-0.29795 -2.98664,-1.06672 -5.29595,-2.45378 -6.47182,-3.88726 l -0.37827,-0.46114 0,-2.86361 0,-2.86362 0.81759,-0.81133 c 0.96413,-0.95675 1.99468,-1.6618 3.51344,-2.40369 1.12874,-0.55138 3.15058,-1.35624 3.40691,-1.35624 0.0783,0 0.68619,0.15689 1.35084,0.34865 1.87621,0.5413 5.4076,1.31736 6.08955,1.33824 0.13499,0.004 -0.15231,0.0797 -0.63845,0.16791 -5.27086,0.95653 -9.67144,2.6835 -11.3361,4.44875 -0.73345,0.77777 -0.82416,1.25546 -0.3689,1.94278 0.84622,1.27756 3.69371,2.76474 7.21735,3.76945 1.3114,0.37393 3.12178,0.80159 4.16219,0.98323 0.72873,0.12722 0.69,0.19167 -0.20487,0.34097 -1.75461,0.29273 -6.00723,1.37494 -6.27448,1.59674 -0.0584,0.0485 -0.13774,0.052 -0.18439,0.008 l 0,0 z" + style="opacity:0.75702485;fill:#000000;fill-opacity:0.87815121;fill-rule:nonzero;stroke:#000000;stroke-width:0.17677669;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate" + inkscape:connector-curvature="0" /> + id="path5997" + d="m 110.89434,285.8727 c -2.97974,-1.05932 -5.15513,-2.36041 -6.44977,-3.85759 l -0.39746,-0.45963 -0.0442,-1.26806 -0.0442,-1.26804 -0.41984,-0.0269 -0.41985,-0.0269 0,-0.41853 c 0,-2.04404 1.89922,-4.27268 4.94131,-5.7984 7.8506,-3.93735 23.33188,-5.37648 36.60122,-3.40242 6.96201,1.03573 12.37864,2.99776 15.01528,5.43888 1.14578,1.06082 1.83041,2.37849 1.93171,3.71784 l 0.0491,0.64909 -0.5431,0 -0.54308,0 0,1.29299 0,1.293 -0.7292,0.72794 c -1.34862,1.34626 -3.49442,2.54869 -6.24118,3.49732 l -0.73899,0.25523 -1.02877,-0.30443 c -1.36151,-0.40289 -3.30364,-0.86466 -4.82947,-1.14827 -0.68059,-0.12651 -1.2971,-0.25257 -1.37002,-0.28014 -0.0729,-0.0276 0.50381,-0.18009 1.28163,-0.33894 4.3054,-0.87926 7.47825,-2.0111 9.54594,-3.40527 0.6798,-0.45836 1.41207,-1.19087 1.54806,-1.54855 0.22369,-0.58836 0.003,-1.17927 -0.6779,-1.81513 -3.16521,-2.95581 -12.32714,-5.13291 -22.61369,-5.37357 -7.26643,-0.17 -13.812,0.44413 -19.25547,1.80661 -4.75881,1.19111 -8.29591,3.03876 -8.81221,4.60318 -0.14583,0.44185 0.0807,0.98274 0.65011,1.55212 1.69707,1.69707 5.63092,3.23496 10.88895,4.25688 l 1.06066,0.20615 -0.88389,0.15531 c -1.89698,0.33331 -5.08622,1.10202 -6.19472,1.49312 -0.33577,0.11847 -0.39379,0.10916 -1.27696,-0.20481 l 0,0 z" + style="opacity:0.75702485;fill:#000000;fill-opacity:0.87815121;fill-rule:nonzero;stroke:#000000;stroke-width:0.17677669;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate" + inkscape:connector-curvature="0" /> + id="path5999" + d="m 104.00292,301.29849 0,-4.46491 0.37565,0.41213 c 1.26058,1.383 2.85364,2.38503 5.54637,3.48865 0.80212,0.32875 1.39653,0.59893 1.3209,0.6004 -0.22966,0.004 -1.64718,0.59841 -2.95609,1.2386 -1.79001,0.87551 -3.0005,1.73092 -3.91118,2.76393 l -0.37565,0.4261 0,-4.4649 z" + style="opacity:0.75702485;fill:#000000;fill-opacity:0.87815121;fill-rule:nonzero;stroke:#000000;stroke-width:0.17677669;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate" + inkscape:connector-curvature="0" /> + id="path6001" + d="m 104.00292,286.20941 0,-4.45445 0.7734,0.75827 c 0.85461,0.83789 2.02948,1.66147 3.23091,2.26487 0.86357,0.43371 2.45502,1.0994 3.04466,1.27357 0.43559,0.12865 0.47418,0.2162 0.13258,0.30072 -0.3889,0.0962 -2.28836,0.90937 -3.21955,1.37828 -1.40705,0.70853 -2.17903,1.25122 -3.34651,2.35256 l -0.61549,0.58063 0,-4.45445 z" + style="opacity:0.75702485;fill:#000000;fill-opacity:0.87815121;fill-rule:nonzero;stroke:#000000;stroke-width:0.17677669;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate" + inkscape:connector-curvature="0" /> + id="path6003" + d="m 104.00292,316.2336 0,-4.3939 0.7734,0.7244 c 1.34707,1.26172 3.30309,2.41097 5.56846,3.27175 0.5469,0.20781 0.99437,0.40523 0.99437,0.43872 0,0.0335 -0.16904,0.1105 -0.37565,0.17113 -0.56633,0.16617 -2.0255,0.76261 -2.74379,1.12152 -1.39667,0.69789 -2.56351,1.50067 -3.59351,2.47231 l -0.62328,0.58798 0,-4.39391 z" + style="opacity:0.75702485;fill:#000000;fill-opacity:0.87815121;fill-rule:nonzero;stroke:#000000;stroke-width:0.17677669;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate" + inkscape:connector-curvature="0" /> + id="path6005" + d="m 159.86435,319.89194 c -1.31096,-1.22228 -3.06266,-2.24617 -5.38968,-3.15032 -0.56016,-0.21764 -1.04794,-0.42322 -1.08396,-0.45684 -0.036,-0.0336 0.36174,-0.21554 0.88388,-0.40427 2.03146,-0.73426 4.16877,-1.96276 5.545,-3.1872 l 0.75187,-0.66894 0,4.24667 c 0,2.33567 -0.01,4.24493 -0.0221,4.24282 -0.0122,-0.002 -0.3204,-0.28198 -0.68501,-0.62192 z" + style="opacity:0.75702485;fill:#000000;fill-opacity:0.87815121;fill-rule:nonzero;stroke:#000000;stroke-width:0.17677669;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate" + inkscape:connector-curvature="0" /> + id="path6007" + d="m 159.90279,304.88522 c -1.21447,-1.16855 -3.39408,-2.44074 -5.59677,-3.26672 -0.50488,-0.18932 -0.88274,-0.3653 -0.83969,-0.39106 0.043,-0.0258 0.51578,-0.21725 1.05053,-0.42553 2.14873,-0.83689 4.19012,-2.0469 5.41296,-3.20849 l 0.64164,-0.60949 0,4.25388 c 0,2.33963 -0.01,4.25261 -0.0221,4.25107 -0.0122,-0.002 -0.30311,-0.27319 -0.64657,-0.60367 l 0,1e-5 z" + style="opacity:0.75702485;fill:#000000;fill-opacity:0.87815121;fill-rule:nonzero;stroke:#000000;stroke-width:0.17677669;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate" + inkscape:connector-curvature="0" /> + id="path6009" + d="m 159.95274,290.00706 c -1.46246,-1.37246 -2.90851,-2.23054 -5.40185,-3.20541 l -1.3802,-0.53963 1.24762,-0.4807 c 2.59064,-0.99815 4.06312,-1.86608 5.59152,-3.29583 l 0.56163,-0.52537 0,4.29696 c 0,2.36334 -0.01,4.29522 -0.0221,4.29306 -0.0122,-0.002 -0.28063,-0.24654 -0.59662,-0.54308 l 0,0 z" + style="opacity:0.75702485;fill:#000000;fill-opacity:0.87815121;fill-rule:nonzero;stroke:#000000;stroke-width:0.17677669;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate" + inkscape:connector-curvature="0" /> @@ -944,135 +1051,148 @@ inkscape:export-xdpi="151" inkscape:export-filename="/home/zooko/playground/allmydata/tahoe/trunk/trunk/docs/network-and-reliance-topology.png" transform="matrix(1.8356478,0,0,1.7055983,609.62935,-243.81548)" - d="M -245,306.36218 A 15,5 0 1 1 -275,306.36218 A 15,5 0 1 1 -245,306.36218 z" + d="m -245,306.36218 c 0,2.76143 -6.71573,5 -15,5 -8.28427,0 -15,-2.23857 -15,-5 0,-2.76142 6.71573,-5 15,-5 8.28427,0 15,2.23858 15,5 z" sodipodi:ry="5" sodipodi:rx="15" sodipodi:cy="306.36218" sodipodi:cx="-260" - id="path5979" - style="opacity:0.75702485;fill:#000000;fill-opacity:0.87815121;fill-rule:nonzero;stroke:#000000;stroke-width:2;stroke-linecap:butt;stroke-linejoin:round;marker:none;marker-start:none;marker-mid:none;marker-end:none;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;visibility:visible;display:inline;overflow:visible;enable-background:accumulate" + id="path6013" + style="opacity:0.75702485;fill:#000000;fill-opacity:0.87815121;fill-rule:nonzero;stroke:#000000;stroke-width:2;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate" sodipodi:type="arc" /> + id="path6015" + d="m 129.50211,285.26188 c -13.87895,0.43698 -24.72388,4.11476 -24.72388,8.528 0,4.70745 12.33555,8.52799 27.53472,8.52799 15.19916,0 27.53471,-3.82054 27.53471,-8.52799 0,-4.70746 -12.33555,-8.528 -27.53471,-8.528 -0.94995,0 -1.88558,-0.0291 -2.81084,0 z" + style="opacity:0.75702485;fill:#000000;fill-opacity:0.87815121;fill-rule:nonzero;stroke:#000000;stroke-width:3.53885722;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate" + inkscape:connector-curvature="0" /> + id="path6021" + d="m 104.51272,278.92156 c 0,44.98967 0,44.98967 0,44.98967" + style="fill:none;stroke:#000000;stroke-width:3;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none" + inkscape:connector-curvature="0" /> + id="path6023" + d="m 160.20443,278.81929 c 0,44.98967 0,44.98967 0,44.98967" + style="fill:none;stroke:#000000;stroke-width:3;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none" + inkscape:connector-curvature="0" /> + id="path6025" + d="m 128.84004,333.94646 c -9.55378,-0.35398 -18.0393,-2.28297 -22.28863,-5.06686 -1.79809,-1.17799 -2.86957,-2.43124 -3.28994,-3.84803 -0.0783,-0.26405 -0.14244,-0.59219 -0.14244,-0.72921 0,-0.24002 0.0162,-0.24912 0.44195,-0.24912 l 0.44194,0 0,-1.59565 0,-1.59565 0.7734,-0.78073 c 1.37242,-1.38543 3.5568,-2.61944 6.28033,-3.54794 0.57165,-0.19487 0.78387,-0.23124 0.95966,-0.1644 0.89212,0.33918 4.16622,1.14142 5.95197,1.45837 0.46183,0.082 0.89198,0.17698 0.95589,0.21113 0.0639,0.0341 -0.27417,0.13069 -0.7513,0.21454 -2.16366,0.38022 -5.27597,1.26641 -7.31986,2.08423 -1.26485,0.5061 -2.73088,1.37245 -3.43155,2.02784 -0.61328,0.57365 -0.88935,1.07747 -0.83056,1.51574 0.10121,0.75461 1.03778,1.65011 2.65467,2.53826 3.03786,1.66869 8.27963,3.00706 14.24698,3.63764 3.43631,0.36313 4.34573,0.40478 8.83883,0.40478 4.49781,0 5.42336,-0.0425 8.83461,-0.40564 6.70706,-0.71397 12.03545,-2.18596 15.15792,-4.18741 0.9919,-0.63579 1.77268,-1.55176 1.77268,-2.07959 0,-1.83648 -5.36139,-4.39355 -11.59973,-5.53239 -0.47331,-0.0864 -0.84731,-0.17033 -0.83113,-0.18651 0.0162,-0.0162 0.72162,-0.16617 1.56765,-0.33331 2.47542,-0.48906 5.64829,-1.3148 5.64829,-1.46998 0,-0.0398 -0.27842,-0.15364 -0.61871,-0.25293 -0.3403,-0.0993 -0.8872,-0.25861 -1.21534,-0.35405 -0.92376,-0.26868 -2.84828,-0.70143 -4.29994,-0.96689 l -1.31683,-0.24079 0.65392,-0.11255 c 3.18241,-0.5477 6.84005,-1.65813 8.9323,-2.71177 2.30807,-1.16233 3.37088,-2.35285 2.99142,-3.3509 -0.23179,-0.60965 -1.60641,-1.72631 -2.97062,-2.41315 -1.9679,-0.99078 -5.64494,-2.1244 -8.55478,-2.6374 l -0.87431,-0.15414 1.31625,-0.24145 c 2.43646,-0.44695 6.04503,-1.38809 6.04503,-1.57659 0,-0.16039 -3.77217,-1.13994 -6.00007,-1.55808 l -1.27132,-0.2386 1.4039,-0.28649 c 4.97987,-1.01622 8.81978,-2.58023 10.32405,-4.20502 0.83053,-0.89707 0.84809,-1.43461 0.0749,-2.29454 -1.05912,-1.17799 -3.67536,-2.47617 -6.82959,-3.38885 -1.31483,-0.38044 -3.76303,-0.95533 -4.59619,-1.07928 -0.69387,-0.10323 -0.63712,-0.16815 0.28981,-0.33148 1.49071,-0.26268 3.64552,-0.75469 5.12759,-1.17079 l 1.39572,-0.39186 0.7409,0.2615 c 2.77417,0.97916 4.85102,2.15234 6.21247,3.50932 l 0.7292,0.7268 0,2.96706 0,2.96706 -0.62748,0.66057 c -1.067,1.12327 -2.90203,2.24749 -5.19388,3.18202 -0.5172,0.21089 -1.10939,0.43336 -1.316,0.49438 -0.20661,0.061 -0.37565,0.14124 -0.37565,0.17825 0,0.037 0.20882,0.135 0.46404,0.21774 2.52291,0.81791 5.13472,2.28602 6.40815,3.60208 l 0.64082,0.66226 0,3.01998 0,3.01998 -0.68501,0.68288 c -1.34606,1.34185 -3.86365,2.74195 -6.45235,3.58832 -0.25522,0.0834 -0.46404,0.18199 -0.46404,0.21901 0,0.037 0.16904,0.1169 0.37565,0.17754 0.70041,0.20554 2.58155,0.99039 3.38086,1.41053 1.28631,0.67613 2.3365,1.41391 3.11569,2.18881 l 0.7292,0.72518 0,1.72989 0,1.72989 0.48614,0 c 0.57517,0 0.59074,0.0469 0.30199,0.90876 -0.55901,1.66855 -2.08207,3.18021 -4.50044,4.46673 -5.78113,3.07547 -16.66459,4.76162 -28.01911,4.34094 l 0,10e-6 z" + style="opacity:0.75702485;fill:#000000;fill-opacity:0.87815121;fill-rule:nonzero;stroke:#000000;stroke-width:0.17677669;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate" + inkscape:connector-curvature="0" /> + id="path6027" + d="m 111.42754,316.1251 c -3.10778,-1.11627 -5.21391,-2.29562 -6.60703,-3.69969 l -0.81759,-0.82402 0,-2.81338 0,-2.81337 0.41984,-0.50506 c 1.06123,-1.27659 2.96244,-2.48815 5.5783,-3.55478 1.61505,-0.65855 1.44972,-0.64809 3.01747,-0.19098 1.56699,0.4569 2.85288,0.76221 4.68458,1.11228 1.15542,0.22082 1.24682,0.25126 0.92808,0.3091 -6.05073,1.09801 -10.93674,3.17996 -11.85169,5.05006 -0.21536,0.44018 -0.22853,0.52104 -0.13545,0.83173 0.0568,0.1894 0.3084,0.56876 0.55967,0.84369 1.57122,1.71916 5.74995,3.388 10.98553,4.38723 0.58336,0.11134 0.96122,0.21541 0.83969,0.23127 -1.27847,0.16687 -4.13999,0.80568 -6.02435,1.34486 -1.25299,0.35852 -1.34298,0.37513 -1.57705,0.29106 z" + style="opacity:0.75702485;fill:#000000;fill-opacity:0.87815121;fill-rule:nonzero;stroke:#000000;stroke-width:0.17677669;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate" + inkscape:connector-curvature="0" /> + id="path6029" + d="m 111.5536,301.25626 c -0.045,-0.0423 -0.36029,-0.17641 -0.70059,-0.29795 -2.98664,-1.06672 -5.29595,-2.45378 -6.47182,-3.88726 l -0.37827,-0.46114 0,-2.86361 0,-2.86362 0.81759,-0.81133 c 0.96413,-0.95675 1.99468,-1.6618 3.51344,-2.40369 1.12874,-0.55138 3.15058,-1.35624 3.40691,-1.35624 0.0783,0 0.68619,0.15689 1.35084,0.34865 1.87621,0.5413 5.4076,1.31736 6.08955,1.33824 0.13499,0.004 -0.15231,0.0797 -0.63845,0.16791 -5.27086,0.95653 -9.67144,2.6835 -11.3361,4.44875 -0.73345,0.77777 -0.82416,1.25546 -0.3689,1.94278 0.84622,1.27756 3.69371,2.76474 7.21735,3.76945 1.3114,0.37393 3.12178,0.80159 4.16219,0.98323 0.72873,0.12722 0.69,0.19167 -0.20487,0.34097 -1.75461,0.29273 -6.00723,1.37494 -6.27448,1.59674 -0.0584,0.0485 -0.13774,0.052 -0.18439,0.008 l 0,0 z" + style="opacity:0.75702485;fill:#000000;fill-opacity:0.87815121;fill-rule:nonzero;stroke:#000000;stroke-width:0.17677669;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate" + inkscape:connector-curvature="0" /> + id="path6031" + d="m 110.89434,285.8727 c -2.97974,-1.05932 -5.15513,-2.36041 -6.44977,-3.85759 l -0.39746,-0.45963 -0.0442,-1.26806 -0.0442,-1.26804 -0.41984,-0.0269 -0.41985,-0.0269 0,-0.41853 c 0,-2.04404 1.89922,-4.27268 4.94131,-5.7984 7.8506,-3.93735 23.33188,-5.37648 36.60122,-3.40242 6.96201,1.03573 12.37864,2.99776 15.01528,5.43888 1.14578,1.06082 1.83041,2.37849 1.93171,3.71784 l 0.0491,0.64909 -0.5431,0 -0.54308,0 0,1.29299 0,1.293 -0.7292,0.72794 c -1.34862,1.34626 -3.49442,2.54869 -6.24118,3.49732 l -0.73899,0.25523 -1.02877,-0.30443 c -1.36151,-0.40289 -3.30364,-0.86466 -4.82947,-1.14827 -0.68059,-0.12651 -1.2971,-0.25257 -1.37002,-0.28014 -0.0729,-0.0276 0.50381,-0.18009 1.28163,-0.33894 4.3054,-0.87926 7.47825,-2.0111 9.54594,-3.40527 0.6798,-0.45836 1.41207,-1.19087 1.54806,-1.54855 0.22369,-0.58836 0.003,-1.17927 -0.6779,-1.81513 -3.16521,-2.95581 -12.32714,-5.13291 -22.61369,-5.37357 -7.26643,-0.17 -13.812,0.44413 -19.25547,1.80661 -4.75881,1.19111 -8.29591,3.03876 -8.81221,4.60318 -0.14583,0.44185 0.0807,0.98274 0.65011,1.55212 1.69707,1.69707 5.63092,3.23496 10.88895,4.25688 l 1.06066,0.20615 -0.88389,0.15531 c -1.89698,0.33331 -5.08622,1.10202 -6.19472,1.49312 -0.33577,0.11847 -0.39379,0.10916 -1.27696,-0.20481 l 0,0 z" + style="opacity:0.75702485;fill:#000000;fill-opacity:0.87815121;fill-rule:nonzero;stroke:#000000;stroke-width:0.17677669;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate" + inkscape:connector-curvature="0" /> + id="path6033" + d="m 104.00292,301.29849 0,-4.46491 0.37565,0.41213 c 1.26058,1.383 2.85364,2.38503 5.54637,3.48865 0.80212,0.32875 1.39653,0.59893 1.3209,0.6004 -0.22966,0.004 -1.64718,0.59841 -2.95609,1.2386 -1.79001,0.87551 -3.0005,1.73092 -3.91118,2.76393 l -0.37565,0.4261 0,-4.4649 z" + style="opacity:0.75702485;fill:#000000;fill-opacity:0.87815121;fill-rule:nonzero;stroke:#000000;stroke-width:0.17677669;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate" + inkscape:connector-curvature="0" /> + id="path6035" + d="m 104.00292,286.20941 0,-4.45445 0.7734,0.75827 c 0.85461,0.83789 2.02948,1.66147 3.23091,2.26487 0.86357,0.43371 2.45502,1.0994 3.04466,1.27357 0.43559,0.12865 0.47418,0.2162 0.13258,0.30072 -0.3889,0.0962 -2.28836,0.90937 -3.21955,1.37828 -1.40705,0.70853 -2.17903,1.25122 -3.34651,2.35256 l -0.61549,0.58063 0,-4.45445 z" + style="opacity:0.75702485;fill:#000000;fill-opacity:0.87815121;fill-rule:nonzero;stroke:#000000;stroke-width:0.17677669;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate" + inkscape:connector-curvature="0" /> + id="path6037" + d="m 104.00292,316.2336 0,-4.3939 0.7734,0.7244 c 1.34707,1.26172 3.30309,2.41097 5.56846,3.27175 0.5469,0.20781 0.99437,0.40523 0.99437,0.43872 0,0.0335 -0.16904,0.1105 -0.37565,0.17113 -0.56633,0.16617 -2.0255,0.76261 -2.74379,1.12152 -1.39667,0.69789 -2.56351,1.50067 -3.59351,2.47231 l -0.62328,0.58798 0,-4.39391 z" + style="opacity:0.75702485;fill:#000000;fill-opacity:0.87815121;fill-rule:nonzero;stroke:#000000;stroke-width:0.17677669;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate" + inkscape:connector-curvature="0" /> + id="path6039" + d="m 159.86435,319.89194 c -1.31096,-1.22228 -3.06266,-2.24617 -5.38968,-3.15032 -0.56016,-0.21764 -1.04794,-0.42322 -1.08396,-0.45684 -0.036,-0.0336 0.36174,-0.21554 0.88388,-0.40427 2.03146,-0.73426 4.16877,-1.96276 5.545,-3.1872 l 0.75187,-0.66894 0,4.24667 c 0,2.33567 -0.01,4.24493 -0.0221,4.24282 -0.0122,-0.002 -0.3204,-0.28198 -0.68501,-0.62192 z" + style="opacity:0.75702485;fill:#000000;fill-opacity:0.87815121;fill-rule:nonzero;stroke:#000000;stroke-width:0.17677669;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate" + inkscape:connector-curvature="0" /> + id="path6041" + d="m 159.90279,304.88522 c -1.21447,-1.16855 -3.39408,-2.44074 -5.59677,-3.26672 -0.50488,-0.18932 -0.88274,-0.3653 -0.83969,-0.39106 0.043,-0.0258 0.51578,-0.21725 1.05053,-0.42553 2.14873,-0.83689 4.19012,-2.0469 5.41296,-3.20849 l 0.64164,-0.60949 0,4.25388 c 0,2.33963 -0.01,4.25261 -0.0221,4.25107 -0.0122,-0.002 -0.30311,-0.27319 -0.64657,-0.60367 l 0,1e-5 z" + style="opacity:0.75702485;fill:#000000;fill-opacity:0.87815121;fill-rule:nonzero;stroke:#000000;stroke-width:0.17677669;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate" + inkscape:connector-curvature="0" /> + id="path6043" + d="m 159.95274,290.00706 c -1.46246,-1.37246 -2.90851,-2.23054 -5.40185,-3.20541 l -1.3802,-0.53963 1.24762,-0.4807 c 2.59064,-0.99815 4.06312,-1.86608 5.59152,-3.29583 l 0.56163,-0.52537 0,4.29696 c 0,2.36334 -0.01,4.29522 -0.0221,4.29306 -0.0122,-0.002 -0.28063,-0.24654 -0.59662,-0.54308 l 0,0 z" + style="opacity:0.75702485;fill:#000000;fill-opacity:0.87815121;fill-rule:nonzero;stroke:#000000;stroke-width:0.17677669;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate" + inkscape:connector-curvature="0" /> @@ -1081,135 +1201,148 @@ inkscape:export-xdpi="151" inkscape:export-filename="/home/zooko/playground/allmydata/tahoe/trunk/trunk/docs/network-and-reliance-topology.png" transform="matrix(1.8356478,0,0,1.7055983,609.62935,-243.81548)" - d="M -245,306.36218 A 15,5 0 1 1 -275,306.36218 A 15,5 0 1 1 -245,306.36218 z" + d="m -245,306.36218 c 0,2.76143 -6.71573,5 -15,5 -8.28427,0 -15,-2.23857 -15,-5 0,-2.76142 6.71573,-5 15,-5 8.28427,0 15,2.23858 15,5 z" sodipodi:ry="5" sodipodi:rx="15" sodipodi:cy="306.36218" sodipodi:cx="-260" - id="path6013" - style="opacity:0.75702485;fill:#000000;fill-opacity:0.87815121;fill-rule:nonzero;stroke:#000000;stroke-width:2;stroke-linecap:butt;stroke-linejoin:round;marker:none;marker-start:none;marker-mid:none;marker-end:none;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;visibility:visible;display:inline;overflow:visible;enable-background:accumulate" + id="path6047" + style="opacity:0.75702485;fill:#000000;fill-opacity:0.87815121;fill-rule:nonzero;stroke:#000000;stroke-width:2;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate" sodipodi:type="arc" /> + id="path6049" + d="m 129.50211,285.26188 c -13.87895,0.43698 -24.72388,4.11476 -24.72388,8.528 0,4.70745 12.33555,8.52799 27.53472,8.52799 15.19916,0 27.53471,-3.82054 27.53471,-8.52799 0,-4.70746 -12.33555,-8.528 -27.53471,-8.528 -0.94995,0 -1.88558,-0.0291 -2.81084,0 z" + style="opacity:0.75702485;fill:#000000;fill-opacity:0.87815121;fill-rule:nonzero;stroke:#000000;stroke-width:3.53885722;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate" + inkscape:connector-curvature="0" /> + id="path6055" + d="m 104.51272,278.92156 c 0,44.98967 0,44.98967 0,44.98967" + style="fill:none;stroke:#000000;stroke-width:3;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none" + inkscape:connector-curvature="0" /> + id="path6057" + d="m 160.20443,278.81929 c 0,44.98967 0,44.98967 0,44.98967" + style="fill:none;stroke:#000000;stroke-width:3;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none" + inkscape:connector-curvature="0" /> + id="path6059" + d="m 128.84004,333.94646 c -9.55378,-0.35398 -18.0393,-2.28297 -22.28863,-5.06686 -1.79809,-1.17799 -2.86957,-2.43124 -3.28994,-3.84803 -0.0783,-0.26405 -0.14244,-0.59219 -0.14244,-0.72921 0,-0.24002 0.0162,-0.24912 0.44195,-0.24912 l 0.44194,0 0,-1.59565 0,-1.59565 0.7734,-0.78073 c 1.37242,-1.38543 3.5568,-2.61944 6.28033,-3.54794 0.57165,-0.19487 0.78387,-0.23124 0.95966,-0.1644 0.89212,0.33918 4.16622,1.14142 5.95197,1.45837 0.46183,0.082 0.89198,0.17698 0.95589,0.21113 0.0639,0.0341 -0.27417,0.13069 -0.7513,0.21454 -2.16366,0.38022 -5.27597,1.26641 -7.31986,2.08423 -1.26485,0.5061 -2.73088,1.37245 -3.43155,2.02784 -0.61328,0.57365 -0.88935,1.07747 -0.83056,1.51574 0.10121,0.75461 1.03778,1.65011 2.65467,2.53826 3.03786,1.66869 8.27963,3.00706 14.24698,3.63764 3.43631,0.36313 4.34573,0.40478 8.83883,0.40478 4.49781,0 5.42336,-0.0425 8.83461,-0.40564 6.70706,-0.71397 12.03545,-2.18596 15.15792,-4.18741 0.9919,-0.63579 1.77268,-1.55176 1.77268,-2.07959 0,-1.83648 -5.36139,-4.39355 -11.59973,-5.53239 -0.47331,-0.0864 -0.84731,-0.17033 -0.83113,-0.18651 0.0162,-0.0162 0.72162,-0.16617 1.56765,-0.33331 2.47542,-0.48906 5.64829,-1.3148 5.64829,-1.46998 0,-0.0398 -0.27842,-0.15364 -0.61871,-0.25293 -0.3403,-0.0993 -0.8872,-0.25861 -1.21534,-0.35405 -0.92376,-0.26868 -2.84828,-0.70143 -4.29994,-0.96689 l -1.31683,-0.24079 0.65392,-0.11255 c 3.18241,-0.5477 6.84005,-1.65813 8.9323,-2.71177 2.30807,-1.16233 3.37088,-2.35285 2.99142,-3.3509 -0.23179,-0.60965 -1.60641,-1.72631 -2.97062,-2.41315 -1.9679,-0.99078 -5.64494,-2.1244 -8.55478,-2.6374 l -0.87431,-0.15414 1.31625,-0.24145 c 2.43646,-0.44695 6.04503,-1.38809 6.04503,-1.57659 0,-0.16039 -3.77217,-1.13994 -6.00007,-1.55808 l -1.27132,-0.2386 1.4039,-0.28649 c 4.97987,-1.01622 8.81978,-2.58023 10.32405,-4.20502 0.83053,-0.89707 0.84809,-1.43461 0.0749,-2.29454 -1.05912,-1.17799 -3.67536,-2.47617 -6.82959,-3.38885 -1.31483,-0.38044 -3.76303,-0.95533 -4.59619,-1.07928 -0.69387,-0.10323 -0.63712,-0.16815 0.28981,-0.33148 1.49071,-0.26268 3.64552,-0.75469 5.12759,-1.17079 l 1.39572,-0.39186 0.7409,0.2615 c 2.77417,0.97916 4.85102,2.15234 6.21247,3.50932 l 0.7292,0.7268 0,2.96706 0,2.96706 -0.62748,0.66057 c -1.067,1.12327 -2.90203,2.24749 -5.19388,3.18202 -0.5172,0.21089 -1.10939,0.43336 -1.316,0.49438 -0.20661,0.061 -0.37565,0.14124 -0.37565,0.17825 0,0.037 0.20882,0.135 0.46404,0.21774 2.52291,0.81791 5.13472,2.28602 6.40815,3.60208 l 0.64082,0.66226 0,3.01998 0,3.01998 -0.68501,0.68288 c -1.34606,1.34185 -3.86365,2.74195 -6.45235,3.58832 -0.25522,0.0834 -0.46404,0.18199 -0.46404,0.21901 0,0.037 0.16904,0.1169 0.37565,0.17754 0.70041,0.20554 2.58155,0.99039 3.38086,1.41053 1.28631,0.67613 2.3365,1.41391 3.11569,2.18881 l 0.7292,0.72518 0,1.72989 0,1.72989 0.48614,0 c 0.57517,0 0.59074,0.0469 0.30199,0.90876 -0.55901,1.66855 -2.08207,3.18021 -4.50044,4.46673 -5.78113,3.07547 -16.66459,4.76162 -28.01911,4.34094 l 0,10e-6 z" + style="opacity:0.75702485;fill:#000000;fill-opacity:0.87815121;fill-rule:nonzero;stroke:#000000;stroke-width:0.17677669;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate" + inkscape:connector-curvature="0" /> + id="path6061" + d="m 111.42754,316.1251 c -3.10778,-1.11627 -5.21391,-2.29562 -6.60703,-3.69969 l -0.81759,-0.82402 0,-2.81338 0,-2.81337 0.41984,-0.50506 c 1.06123,-1.27659 2.96244,-2.48815 5.5783,-3.55478 1.61505,-0.65855 1.44972,-0.64809 3.01747,-0.19098 1.56699,0.4569 2.85288,0.76221 4.68458,1.11228 1.15542,0.22082 1.24682,0.25126 0.92808,0.3091 -6.05073,1.09801 -10.93674,3.17996 -11.85169,5.05006 -0.21536,0.44018 -0.22853,0.52104 -0.13545,0.83173 0.0568,0.1894 0.3084,0.56876 0.55967,0.84369 1.57122,1.71916 5.74995,3.388 10.98553,4.38723 0.58336,0.11134 0.96122,0.21541 0.83969,0.23127 -1.27847,0.16687 -4.13999,0.80568 -6.02435,1.34486 -1.25299,0.35852 -1.34298,0.37513 -1.57705,0.29106 z" + style="opacity:0.75702485;fill:#000000;fill-opacity:0.87815121;fill-rule:nonzero;stroke:#000000;stroke-width:0.17677669;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate" + inkscape:connector-curvature="0" /> + id="path6063" + d="m 111.5536,301.25626 c -0.045,-0.0423 -0.36029,-0.17641 -0.70059,-0.29795 -2.98664,-1.06672 -5.29595,-2.45378 -6.47182,-3.88726 l -0.37827,-0.46114 0,-2.86361 0,-2.86362 0.81759,-0.81133 c 0.96413,-0.95675 1.99468,-1.6618 3.51344,-2.40369 1.12874,-0.55138 3.15058,-1.35624 3.40691,-1.35624 0.0783,0 0.68619,0.15689 1.35084,0.34865 1.87621,0.5413 5.4076,1.31736 6.08955,1.33824 0.13499,0.004 -0.15231,0.0797 -0.63845,0.16791 -5.27086,0.95653 -9.67144,2.6835 -11.3361,4.44875 -0.73345,0.77777 -0.82416,1.25546 -0.3689,1.94278 0.84622,1.27756 3.69371,2.76474 7.21735,3.76945 1.3114,0.37393 3.12178,0.80159 4.16219,0.98323 0.72873,0.12722 0.69,0.19167 -0.20487,0.34097 -1.75461,0.29273 -6.00723,1.37494 -6.27448,1.59674 -0.0584,0.0485 -0.13774,0.052 -0.18439,0.008 l 0,0 z" + style="opacity:0.75702485;fill:#000000;fill-opacity:0.87815121;fill-rule:nonzero;stroke:#000000;stroke-width:0.17677669;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate" + inkscape:connector-curvature="0" /> + id="path6065" + d="m 110.89434,285.8727 c -2.97974,-1.05932 -5.15513,-2.36041 -6.44977,-3.85759 l -0.39746,-0.45963 -0.0442,-1.26806 -0.0442,-1.26804 -0.41984,-0.0269 -0.41985,-0.0269 0,-0.41853 c 0,-2.04404 1.89922,-4.27268 4.94131,-5.7984 7.8506,-3.93735 23.33188,-5.37648 36.60122,-3.40242 6.96201,1.03573 12.37864,2.99776 15.01528,5.43888 1.14578,1.06082 1.83041,2.37849 1.93171,3.71784 l 0.0491,0.64909 -0.5431,0 -0.54308,0 0,1.29299 0,1.293 -0.7292,0.72794 c -1.34862,1.34626 -3.49442,2.54869 -6.24118,3.49732 l -0.73899,0.25523 -1.02877,-0.30443 c -1.36151,-0.40289 -3.30364,-0.86466 -4.82947,-1.14827 -0.68059,-0.12651 -1.2971,-0.25257 -1.37002,-0.28014 -0.0729,-0.0276 0.50381,-0.18009 1.28163,-0.33894 4.3054,-0.87926 7.47825,-2.0111 9.54594,-3.40527 0.6798,-0.45836 1.41207,-1.19087 1.54806,-1.54855 0.22369,-0.58836 0.003,-1.17927 -0.6779,-1.81513 -3.16521,-2.95581 -12.32714,-5.13291 -22.61369,-5.37357 -7.26643,-0.17 -13.812,0.44413 -19.25547,1.80661 -4.75881,1.19111 -8.29591,3.03876 -8.81221,4.60318 -0.14583,0.44185 0.0807,0.98274 0.65011,1.55212 1.69707,1.69707 5.63092,3.23496 10.88895,4.25688 l 1.06066,0.20615 -0.88389,0.15531 c -1.89698,0.33331 -5.08622,1.10202 -6.19472,1.49312 -0.33577,0.11847 -0.39379,0.10916 -1.27696,-0.20481 l 0,0 z" + style="opacity:0.75702485;fill:#000000;fill-opacity:0.87815121;fill-rule:nonzero;stroke:#000000;stroke-width:0.17677669;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate" + inkscape:connector-curvature="0" /> + id="path6067" + d="m 104.00292,301.29849 0,-4.46491 0.37565,0.41213 c 1.26058,1.383 2.85364,2.38503 5.54637,3.48865 0.80212,0.32875 1.39653,0.59893 1.3209,0.6004 -0.22966,0.004 -1.64718,0.59841 -2.95609,1.2386 -1.79001,0.87551 -3.0005,1.73092 -3.91118,2.76393 l -0.37565,0.4261 0,-4.4649 z" + style="opacity:0.75702485;fill:#000000;fill-opacity:0.87815121;fill-rule:nonzero;stroke:#000000;stroke-width:0.17677669;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate" + inkscape:connector-curvature="0" /> + id="path6069" + d="m 104.00292,286.20941 0,-4.45445 0.7734,0.75827 c 0.85461,0.83789 2.02948,1.66147 3.23091,2.26487 0.86357,0.43371 2.45502,1.0994 3.04466,1.27357 0.43559,0.12865 0.47418,0.2162 0.13258,0.30072 -0.3889,0.0962 -2.28836,0.90937 -3.21955,1.37828 -1.40705,0.70853 -2.17903,1.25122 -3.34651,2.35256 l -0.61549,0.58063 0,-4.45445 z" + style="opacity:0.75702485;fill:#000000;fill-opacity:0.87815121;fill-rule:nonzero;stroke:#000000;stroke-width:0.17677669;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate" + inkscape:connector-curvature="0" /> + id="path6071" + d="m 104.00292,316.2336 0,-4.3939 0.7734,0.7244 c 1.34707,1.26172 3.30309,2.41097 5.56846,3.27175 0.5469,0.20781 0.99437,0.40523 0.99437,0.43872 0,0.0335 -0.16904,0.1105 -0.37565,0.17113 -0.56633,0.16617 -2.0255,0.76261 -2.74379,1.12152 -1.39667,0.69789 -2.56351,1.50067 -3.59351,2.47231 l -0.62328,0.58798 0,-4.39391 z" + style="opacity:0.75702485;fill:#000000;fill-opacity:0.87815121;fill-rule:nonzero;stroke:#000000;stroke-width:0.17677669;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate" + inkscape:connector-curvature="0" /> + id="path6073" + d="m 159.86435,319.89194 c -1.31096,-1.22228 -3.06266,-2.24617 -5.38968,-3.15032 -0.56016,-0.21764 -1.04794,-0.42322 -1.08396,-0.45684 -0.036,-0.0336 0.36174,-0.21554 0.88388,-0.40427 2.03146,-0.73426 4.16877,-1.96276 5.545,-3.1872 l 0.75187,-0.66894 0,4.24667 c 0,2.33567 -0.01,4.24493 -0.0221,4.24282 -0.0122,-0.002 -0.3204,-0.28198 -0.68501,-0.62192 z" + style="opacity:0.75702485;fill:#000000;fill-opacity:0.87815121;fill-rule:nonzero;stroke:#000000;stroke-width:0.17677669;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate" + inkscape:connector-curvature="0" /> + id="path6075" + d="m 159.90279,304.88522 c -1.21447,-1.16855 -3.39408,-2.44074 -5.59677,-3.26672 -0.50488,-0.18932 -0.88274,-0.3653 -0.83969,-0.39106 0.043,-0.0258 0.51578,-0.21725 1.05053,-0.42553 2.14873,-0.83689 4.19012,-2.0469 5.41296,-3.20849 l 0.64164,-0.60949 0,4.25388 c 0,2.33963 -0.01,4.25261 -0.0221,4.25107 -0.0122,-0.002 -0.30311,-0.27319 -0.64657,-0.60367 l 0,1e-5 z" + style="opacity:0.75702485;fill:#000000;fill-opacity:0.87815121;fill-rule:nonzero;stroke:#000000;stroke-width:0.17677669;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate" + inkscape:connector-curvature="0" /> + id="path6077" + d="m 159.95274,290.00706 c -1.46246,-1.37246 -2.90851,-2.23054 -5.40185,-3.20541 l -1.3802,-0.53963 1.24762,-0.4807 c 2.59064,-0.99815 4.06312,-1.86608 5.59152,-3.29583 l 0.56163,-0.52537 0,4.29696 c 0,2.36334 -0.01,4.29522 -0.0221,4.29306 -0.0122,-0.002 -0.28063,-0.24654 -0.59662,-0.54308 l 0,0 z" + style="opacity:0.75702485;fill:#000000;fill-opacity:0.87815121;fill-rule:nonzero;stroke:#000000;stroke-width:0.17677669;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate" + inkscape:connector-curvature="0" /> @@ -1218,135 +1351,337 @@ inkscape:export-xdpi="151" inkscape:export-filename="/home/zooko/playground/allmydata/tahoe/trunk/trunk/docs/network-and-reliance-topology.png" transform="matrix(1.8356478,0,0,1.7055983,609.62935,-243.81548)" - d="M -245,306.36218 A 15,5 0 1 1 -275,306.36218 A 15,5 0 1 1 -245,306.36218 z" + d="m -245,306.36218 c 0,2.76143 -6.71573,5 -15,5 -8.28427,0 -15,-2.23857 -15,-5 0,-2.76142 6.71573,-5 15,-5 8.28427,0 15,2.23858 15,5 z" sodipodi:ry="5" sodipodi:rx="15" sodipodi:cy="306.36218" sodipodi:cx="-260" - id="path6047" - style="opacity:0.75702485;fill:#000000;fill-opacity:0.87815121;fill-rule:nonzero;stroke:#000000;stroke-width:2;stroke-linecap:butt;stroke-linejoin:round;marker:none;marker-start:none;marker-mid:none;marker-end:none;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;visibility:visible;display:inline;overflow:visible;enable-background:accumulate" + id="path6081" + style="opacity:0.75702485;fill:#000000;fill-opacity:0.87815121;fill-rule:nonzero;stroke:#000000;stroke-width:2;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate" sodipodi:type="arc" /> + id="path6083" + d="m 129.50211,285.26188 c -13.87895,0.43698 -24.72388,4.11476 -24.72388,8.528 0,4.70745 12.33555,8.52799 27.53472,8.52799 15.19916,0 27.53471,-3.82054 27.53471,-8.52799 0,-4.70746 -12.33555,-8.528 -27.53471,-8.528 -0.94995,0 -1.88558,-0.0291 -2.81084,0 z" + style="opacity:0.75702485;fill:#000000;fill-opacity:0.87815121;fill-rule:nonzero;stroke:#000000;stroke-width:3.53885722;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate" + inkscape:connector-curvature="0" /> + id="path6089" + d="m 104.51272,278.92156 c 0,44.98967 0,44.98967 0,44.98967" + style="fill:none;stroke:#000000;stroke-width:3;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none" + inkscape:connector-curvature="0" /> + id="path6091" + d="m 160.20443,278.81929 c 0,44.98967 0,44.98967 0,44.98967" + style="fill:none;stroke:#000000;stroke-width:3;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none" + inkscape:connector-curvature="0" /> + id="path6093" + d="m 128.84004,333.94646 c -9.55378,-0.35398 -18.0393,-2.28297 -22.28863,-5.06686 -1.79809,-1.17799 -2.86957,-2.43124 -3.28994,-3.84803 -0.0783,-0.26405 -0.14244,-0.59219 -0.14244,-0.72921 0,-0.24002 0.0162,-0.24912 0.44195,-0.24912 l 0.44194,0 0,-1.59565 0,-1.59565 0.7734,-0.78073 c 1.37242,-1.38543 3.5568,-2.61944 6.28033,-3.54794 0.57165,-0.19487 0.78387,-0.23124 0.95966,-0.1644 0.89212,0.33918 4.16622,1.14142 5.95197,1.45837 0.46183,0.082 0.89198,0.17698 0.95589,0.21113 0.0639,0.0341 -0.27417,0.13069 -0.7513,0.21454 -2.16366,0.38022 -5.27597,1.26641 -7.31986,2.08423 -1.26485,0.5061 -2.73088,1.37245 -3.43155,2.02784 -0.61328,0.57365 -0.88935,1.07747 -0.83056,1.51574 0.10121,0.75461 1.03778,1.65011 2.65467,2.53826 3.03786,1.66869 8.27963,3.00706 14.24698,3.63764 3.43631,0.36313 4.34573,0.40478 8.83883,0.40478 4.49781,0 5.42336,-0.0425 8.83461,-0.40564 6.70706,-0.71397 12.03545,-2.18596 15.15792,-4.18741 0.9919,-0.63579 1.77268,-1.55176 1.77268,-2.07959 0,-1.83648 -5.36139,-4.39355 -11.59973,-5.53239 -0.47331,-0.0864 -0.84731,-0.17033 -0.83113,-0.18651 0.0162,-0.0162 0.72162,-0.16617 1.56765,-0.33331 2.47542,-0.48906 5.64829,-1.3148 5.64829,-1.46998 0,-0.0398 -0.27842,-0.15364 -0.61871,-0.25293 -0.3403,-0.0993 -0.8872,-0.25861 -1.21534,-0.35405 -0.92376,-0.26868 -2.84828,-0.70143 -4.29994,-0.96689 l -1.31683,-0.24079 0.65392,-0.11255 c 3.18241,-0.5477 6.84005,-1.65813 8.9323,-2.71177 2.30807,-1.16233 3.37088,-2.35285 2.99142,-3.3509 -0.23179,-0.60965 -1.60641,-1.72631 -2.97062,-2.41315 -1.9679,-0.99078 -5.64494,-2.1244 -8.55478,-2.6374 l -0.87431,-0.15414 1.31625,-0.24145 c 2.43646,-0.44695 6.04503,-1.38809 6.04503,-1.57659 0,-0.16039 -3.77217,-1.13994 -6.00007,-1.55808 l -1.27132,-0.2386 1.4039,-0.28649 c 4.97987,-1.01622 8.81978,-2.58023 10.32405,-4.20502 0.83053,-0.89707 0.84809,-1.43461 0.0749,-2.29454 -1.05912,-1.17799 -3.67536,-2.47617 -6.82959,-3.38885 -1.31483,-0.38044 -3.76303,-0.95533 -4.59619,-1.07928 -0.69387,-0.10323 -0.63712,-0.16815 0.28981,-0.33148 1.49071,-0.26268 3.64552,-0.75469 5.12759,-1.17079 l 1.39572,-0.39186 0.7409,0.2615 c 2.77417,0.97916 4.85102,2.15234 6.21247,3.50932 l 0.7292,0.7268 0,2.96706 0,2.96706 -0.62748,0.66057 c -1.067,1.12327 -2.90203,2.24749 -5.19388,3.18202 -0.5172,0.21089 -1.10939,0.43336 -1.316,0.49438 -0.20661,0.061 -0.37565,0.14124 -0.37565,0.17825 0,0.037 0.20882,0.135 0.46404,0.21774 2.52291,0.81791 5.13472,2.28602 6.40815,3.60208 l 0.64082,0.66226 0,3.01998 0,3.01998 -0.68501,0.68288 c -1.34606,1.34185 -3.86365,2.74195 -6.45235,3.58832 -0.25522,0.0834 -0.46404,0.18199 -0.46404,0.21901 0,0.037 0.16904,0.1169 0.37565,0.17754 0.70041,0.20554 2.58155,0.99039 3.38086,1.41053 1.28631,0.67613 2.3365,1.41391 3.11569,2.18881 l 0.7292,0.72518 0,1.72989 0,1.72989 0.48614,0 c 0.57517,0 0.59074,0.0469 0.30199,0.90876 -0.55901,1.66855 -2.08207,3.18021 -4.50044,4.46673 -5.78113,3.07547 -16.66459,4.76162 -28.01911,4.34094 l 0,10e-6 z" + style="opacity:0.75702485;fill:#000000;fill-opacity:0.87815121;fill-rule:nonzero;stroke:#000000;stroke-width:0.17677669;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate" + inkscape:connector-curvature="0" /> + id="path6095" + d="m 111.42754,316.1251 c -3.10778,-1.11627 -5.21391,-2.29562 -6.60703,-3.69969 l -0.81759,-0.82402 0,-2.81338 0,-2.81337 0.41984,-0.50506 c 1.06123,-1.27659 2.96244,-2.48815 5.5783,-3.55478 1.61505,-0.65855 1.44972,-0.64809 3.01747,-0.19098 1.56699,0.4569 2.85288,0.76221 4.68458,1.11228 1.15542,0.22082 1.24682,0.25126 0.92808,0.3091 -6.05073,1.09801 -10.93674,3.17996 -11.85169,5.05006 -0.21536,0.44018 -0.22853,0.52104 -0.13545,0.83173 0.0568,0.1894 0.3084,0.56876 0.55967,0.84369 1.57122,1.71916 5.74995,3.388 10.98553,4.38723 0.58336,0.11134 0.96122,0.21541 0.83969,0.23127 -1.27847,0.16687 -4.13999,0.80568 -6.02435,1.34486 -1.25299,0.35852 -1.34298,0.37513 -1.57705,0.29106 z" + style="opacity:0.75702485;fill:#000000;fill-opacity:0.87815121;fill-rule:nonzero;stroke:#000000;stroke-width:0.17677669;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate" + inkscape:connector-curvature="0" /> + id="path6097" + d="m 111.5536,301.25626 c -0.045,-0.0423 -0.36029,-0.17641 -0.70059,-0.29795 -2.98664,-1.06672 -5.29595,-2.45378 -6.47182,-3.88726 l -0.37827,-0.46114 0,-2.86361 0,-2.86362 0.81759,-0.81133 c 0.96413,-0.95675 1.99468,-1.6618 3.51344,-2.40369 1.12874,-0.55138 3.15058,-1.35624 3.40691,-1.35624 0.0783,0 0.68619,0.15689 1.35084,0.34865 1.87621,0.5413 5.4076,1.31736 6.08955,1.33824 0.13499,0.004 -0.15231,0.0797 -0.63845,0.16791 -5.27086,0.95653 -9.67144,2.6835 -11.3361,4.44875 -0.73345,0.77777 -0.82416,1.25546 -0.3689,1.94278 0.84622,1.27756 3.69371,2.76474 7.21735,3.76945 1.3114,0.37393 3.12178,0.80159 4.16219,0.98323 0.72873,0.12722 0.69,0.19167 -0.20487,0.34097 -1.75461,0.29273 -6.00723,1.37494 -6.27448,1.59674 -0.0584,0.0485 -0.13774,0.052 -0.18439,0.008 l 0,0 z" + style="opacity:0.75702485;fill:#000000;fill-opacity:0.87815121;fill-rule:nonzero;stroke:#000000;stroke-width:0.17677669;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate" + inkscape:connector-curvature="0" /> + id="path6099" + d="m 110.89434,285.8727 c -2.97974,-1.05932 -5.15513,-2.36041 -6.44977,-3.85759 l -0.39746,-0.45963 -0.0442,-1.26806 -0.0442,-1.26804 -0.41984,-0.0269 -0.41985,-0.0269 0,-0.41853 c 0,-2.04404 1.89922,-4.27268 4.94131,-5.7984 7.8506,-3.93735 23.33188,-5.37648 36.60122,-3.40242 6.96201,1.03573 12.37864,2.99776 15.01528,5.43888 1.14578,1.06082 1.83041,2.37849 1.93171,3.71784 l 0.0491,0.64909 -0.5431,0 -0.54308,0 0,1.29299 0,1.293 -0.7292,0.72794 c -1.34862,1.34626 -3.49442,2.54869 -6.24118,3.49732 l -0.73899,0.25523 -1.02877,-0.30443 c -1.36151,-0.40289 -3.30364,-0.86466 -4.82947,-1.14827 -0.68059,-0.12651 -1.2971,-0.25257 -1.37002,-0.28014 -0.0729,-0.0276 0.50381,-0.18009 1.28163,-0.33894 4.3054,-0.87926 7.47825,-2.0111 9.54594,-3.40527 0.6798,-0.45836 1.41207,-1.19087 1.54806,-1.54855 0.22369,-0.58836 0.003,-1.17927 -0.6779,-1.81513 -3.16521,-2.95581 -12.32714,-5.13291 -22.61369,-5.37357 -7.26643,-0.17 -13.812,0.44413 -19.25547,1.80661 -4.75881,1.19111 -8.29591,3.03876 -8.81221,4.60318 -0.14583,0.44185 0.0807,0.98274 0.65011,1.55212 1.69707,1.69707 5.63092,3.23496 10.88895,4.25688 l 1.06066,0.20615 -0.88389,0.15531 c -1.89698,0.33331 -5.08622,1.10202 -6.19472,1.49312 -0.33577,0.11847 -0.39379,0.10916 -1.27696,-0.20481 l 0,0 z" + style="opacity:0.75702485;fill:#000000;fill-opacity:0.87815121;fill-rule:nonzero;stroke:#000000;stroke-width:0.17677669;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate" + inkscape:connector-curvature="0" /> + id="path6101" + d="m 104.00292,301.29849 0,-4.46491 0.37565,0.41213 c 1.26058,1.383 2.85364,2.38503 5.54637,3.48865 0.80212,0.32875 1.39653,0.59893 1.3209,0.6004 -0.22966,0.004 -1.64718,0.59841 -2.95609,1.2386 -1.79001,0.87551 -3.0005,1.73092 -3.91118,2.76393 l -0.37565,0.4261 0,-4.4649 z" + style="opacity:0.75702485;fill:#000000;fill-opacity:0.87815121;fill-rule:nonzero;stroke:#000000;stroke-width:0.17677669;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate" + inkscape:connector-curvature="0" /> + id="path6103" + d="m 104.00292,286.20941 0,-4.45445 0.7734,0.75827 c 0.85461,0.83789 2.02948,1.66147 3.23091,2.26487 0.86357,0.43371 2.45502,1.0994 3.04466,1.27357 0.43559,0.12865 0.47418,0.2162 0.13258,0.30072 -0.3889,0.0962 -2.28836,0.90937 -3.21955,1.37828 -1.40705,0.70853 -2.17903,1.25122 -3.34651,2.35256 l -0.61549,0.58063 0,-4.45445 z" + style="opacity:0.75702485;fill:#000000;fill-opacity:0.87815121;fill-rule:nonzero;stroke:#000000;stroke-width:0.17677669;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate" + inkscape:connector-curvature="0" /> + id="path6105" + d="m 104.00292,316.2336 0,-4.3939 0.7734,0.7244 c 1.34707,1.26172 3.30309,2.41097 5.56846,3.27175 0.5469,0.20781 0.99437,0.40523 0.99437,0.43872 0,0.0335 -0.16904,0.1105 -0.37565,0.17113 -0.56633,0.16617 -2.0255,0.76261 -2.74379,1.12152 -1.39667,0.69789 -2.56351,1.50067 -3.59351,2.47231 l -0.62328,0.58798 0,-4.39391 z" + style="opacity:0.75702485;fill:#000000;fill-opacity:0.87815121;fill-rule:nonzero;stroke:#000000;stroke-width:0.17677669;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate" + inkscape:connector-curvature="0" /> + id="path6107" + d="m 159.86435,319.89194 c -1.31096,-1.22228 -3.06266,-2.24617 -5.38968,-3.15032 -0.56016,-0.21764 -1.04794,-0.42322 -1.08396,-0.45684 -0.036,-0.0336 0.36174,-0.21554 0.88388,-0.40427 2.03146,-0.73426 4.16877,-1.96276 5.545,-3.1872 l 0.75187,-0.66894 0,4.24667 c 0,2.33567 -0.01,4.24493 -0.0221,4.24282 -0.0122,-0.002 -0.3204,-0.28198 -0.68501,-0.62192 z" + style="opacity:0.75702485;fill:#000000;fill-opacity:0.87815121;fill-rule:nonzero;stroke:#000000;stroke-width:0.17677669;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate" + inkscape:connector-curvature="0" /> + id="path6109" + d="m 159.90279,304.88522 c -1.21447,-1.16855 -3.39408,-2.44074 -5.59677,-3.26672 -0.50488,-0.18932 -0.88274,-0.3653 -0.83969,-0.39106 0.043,-0.0258 0.51578,-0.21725 1.05053,-0.42553 2.14873,-0.83689 4.19012,-2.0469 5.41296,-3.20849 l 0.64164,-0.60949 0,4.25388 c 0,2.33963 -0.01,4.25261 -0.0221,4.25107 -0.0122,-0.002 -0.30311,-0.27319 -0.64657,-0.60367 l 0,1e-5 z" + style="opacity:0.75702485;fill:#000000;fill-opacity:0.87815121;fill-rule:nonzero;stroke:#000000;stroke-width:0.17677669;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate" + inkscape:connector-curvature="0" /> + id="path6111" + d="m 159.95274,290.00706 c -1.46246,-1.37246 -2.90851,-2.23054 -5.40185,-3.20541 l -1.3802,-0.53963 1.24762,-0.4807 c 2.59064,-0.99815 4.06312,-1.86608 5.59152,-3.29583 l 0.56163,-0.52537 0,4.29696 c 0,2.36334 -0.01,4.29522 -0.0221,4.29306 -0.0122,-0.002 -0.28063,-0.24654 -0.59662,-0.54308 l 0,0 z" + style="opacity:0.75702485;fill:#000000;fill-opacity:0.87815121;fill-rule:nonzero;stroke:#000000;stroke-width:0.17677669;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate" + inkscape:connector-curvature="0" /> + + Tahoe-LAFS storage protocol + Tahoe-LAFS web-API + + + FTPserver + SFTPserver + + + SFTP + + + + + + + Cloudstorage + @@ -1355,153 +1690,237 @@ inkscape:export-xdpi="151" inkscape:export-filename="/home/zooko/playground/allmydata/tahoe/trunk/trunk/docs/network-and-reliance-topology.png" transform="matrix(1.8356478,0,0,1.7055983,609.62935,-243.81548)" - d="M -245,306.36218 A 15,5 0 1 1 -275,306.36218 A 15,5 0 1 1 -245,306.36218 z" + d="m -245,306.36218 c 0,2.76143 -6.71573,5 -15,5 -8.28427,0 -15,-2.23857 -15,-5 0,-2.76142 6.71573,-5 15,-5 8.28427,0 15,2.23858 15,5 z" sodipodi:ry="5" sodipodi:rx="15" sodipodi:cy="306.36218" sodipodi:cx="-260" - id="path6081" - style="opacity:0.75702485;fill:#000000;fill-opacity:0.87815121;fill-rule:nonzero;stroke:#000000;stroke-width:2;stroke-linecap:butt;stroke-linejoin:round;marker:none;marker-start:none;marker-mid:none;marker-end:none;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;visibility:visible;display:inline;overflow:visible;enable-background:accumulate" + id="path5979-0" + style="opacity:0.75702485;fill:#000000;fill-opacity:0.87815121;fill-rule:nonzero;stroke:#000000;stroke-width:2;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate" sodipodi:type="arc" /> + id="path5981-5" + d="m 129.50211,285.26188 c -13.87895,0.43698 -24.72388,4.11476 -24.72388,8.528 0,4.70745 12.33555,8.52799 27.53472,8.52799 15.19916,0 27.53471,-3.82054 27.53471,-8.52799 0,-4.70746 -12.33555,-8.528 -27.53471,-8.528 -0.94995,0 -1.88558,-0.0291 -2.81084,0 z" + style="opacity:0.75702485;fill:#000000;fill-opacity:0.87815121;fill-rule:nonzero;stroke:#000000;stroke-width:3.53885722;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate" + inkscape:connector-curvature="0" /> + id="path5987-9" + d="m 104.51272,278.92156 c 0,44.98967 0,44.98967 0,44.98967" + style="fill:none;stroke:#000000;stroke-width:3;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none" + inkscape:connector-curvature="0" /> + id="path5989-4" + d="m 160.20443,278.81929 c 0,44.98967 0,44.98967 0,44.98967" + style="fill:none;stroke:#000000;stroke-width:3;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none" + inkscape:connector-curvature="0" /> + id="path5991-3" + d="m 128.84004,333.94646 c -9.55378,-0.35398 -18.0393,-2.28297 -22.28863,-5.06686 -1.79809,-1.17799 -2.86957,-2.43124 -3.28994,-3.84803 -0.0783,-0.26405 -0.14244,-0.59219 -0.14244,-0.72921 0,-0.24002 0.0162,-0.24912 0.44195,-0.24912 l 0.44194,0 0,-1.59565 0,-1.59565 0.7734,-0.78073 c 1.37242,-1.38543 3.5568,-2.61944 6.28033,-3.54794 0.57165,-0.19487 0.78387,-0.23124 0.95966,-0.1644 0.89212,0.33918 4.16622,1.14142 5.95197,1.45837 0.46183,0.082 0.89198,0.17698 0.95589,0.21113 0.0639,0.0341 -0.27417,0.13069 -0.7513,0.21454 -2.16366,0.38022 -5.27597,1.26641 -7.31986,2.08423 -1.26485,0.5061 -2.73088,1.37245 -3.43155,2.02784 -0.61328,0.57365 -0.88935,1.07747 -0.83056,1.51574 0.10121,0.75461 1.03778,1.65011 2.65467,2.53826 3.03786,1.66869 8.27963,3.00706 14.24698,3.63764 3.43631,0.36313 4.34573,0.40478 8.83883,0.40478 4.49781,0 5.42336,-0.0425 8.83461,-0.40564 6.70706,-0.71397 12.03545,-2.18596 15.15792,-4.18741 0.9919,-0.63579 1.77268,-1.55176 1.77268,-2.07959 0,-1.83648 -5.36139,-4.39355 -11.59973,-5.53239 -0.47331,-0.0864 -0.84731,-0.17033 -0.83113,-0.18651 0.0162,-0.0162 0.72162,-0.16617 1.56765,-0.33331 2.47542,-0.48906 5.64829,-1.3148 5.64829,-1.46998 0,-0.0398 -0.27842,-0.15364 -0.61871,-0.25293 -0.3403,-0.0993 -0.8872,-0.25861 -1.21534,-0.35405 -0.92376,-0.26868 -2.84828,-0.70143 -4.29994,-0.96689 l -1.31683,-0.24079 0.65392,-0.11255 c 3.18241,-0.5477 6.84005,-1.65813 8.9323,-2.71177 2.30807,-1.16233 3.37088,-2.35285 2.99142,-3.3509 -0.23179,-0.60965 -1.60641,-1.72631 -2.97062,-2.41315 -1.9679,-0.99078 -5.64494,-2.1244 -8.55478,-2.6374 l -0.87431,-0.15414 1.31625,-0.24145 c 2.43646,-0.44695 6.04503,-1.38809 6.04503,-1.57659 0,-0.16039 -3.77217,-1.13994 -6.00007,-1.55808 l -1.27132,-0.2386 1.4039,-0.28649 c 4.97987,-1.01622 8.81978,-2.58023 10.32405,-4.20502 0.83053,-0.89707 0.84809,-1.43461 0.0749,-2.29454 -1.05912,-1.17799 -3.67536,-2.47617 -6.82959,-3.38885 -1.31483,-0.38044 -3.76303,-0.95533 -4.59619,-1.07928 -0.69387,-0.10323 -0.63712,-0.16815 0.28981,-0.33148 1.49071,-0.26268 3.64552,-0.75469 5.12759,-1.17079 l 1.39572,-0.39186 0.7409,0.2615 c 2.77417,0.97916 4.85102,2.15234 6.21247,3.50932 l 0.7292,0.7268 0,2.96706 0,2.96706 -0.62748,0.66057 c -1.067,1.12327 -2.90203,2.24749 -5.19388,3.18202 -0.5172,0.21089 -1.10939,0.43336 -1.316,0.49438 -0.20661,0.061 -0.37565,0.14124 -0.37565,0.17825 0,0.037 0.20882,0.135 0.46404,0.21774 2.52291,0.81791 5.13472,2.28602 6.40815,3.60208 l 0.64082,0.66226 0,3.01998 0,3.01998 -0.68501,0.68288 c -1.34606,1.34185 -3.86365,2.74195 -6.45235,3.58832 -0.25522,0.0834 -0.46404,0.18199 -0.46404,0.21901 0,0.037 0.16904,0.1169 0.37565,0.17754 0.70041,0.20554 2.58155,0.99039 3.38086,1.41053 1.28631,0.67613 2.3365,1.41391 3.11569,2.18881 l 0.7292,0.72518 0,1.72989 0,1.72989 0.48614,0 c 0.57517,0 0.59074,0.0469 0.30199,0.90876 -0.55901,1.66855 -2.08207,3.18021 -4.50044,4.46673 -5.78113,3.07547 -16.66459,4.76162 -28.01911,4.34094 l 0,10e-6 z" + style="opacity:0.75702485;fill:#000000;fill-opacity:0.87815121;fill-rule:nonzero;stroke:#000000;stroke-width:0.17677669;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate" + inkscape:connector-curvature="0" /> + id="path5993-5" + d="m 111.42754,316.1251 c -3.10778,-1.11627 -5.21391,-2.29562 -6.60703,-3.69969 l -0.81759,-0.82402 0,-2.81338 0,-2.81337 0.41984,-0.50506 c 1.06123,-1.27659 2.96244,-2.48815 5.5783,-3.55478 1.61505,-0.65855 1.44972,-0.64809 3.01747,-0.19098 1.56699,0.4569 2.85288,0.76221 4.68458,1.11228 1.15542,0.22082 1.24682,0.25126 0.92808,0.3091 -6.05073,1.09801 -10.93674,3.17996 -11.85169,5.05006 -0.21536,0.44018 -0.22853,0.52104 -0.13545,0.83173 0.0568,0.1894 0.3084,0.56876 0.55967,0.84369 1.57122,1.71916 5.74995,3.388 10.98553,4.38723 0.58336,0.11134 0.96122,0.21541 0.83969,0.23127 -1.27847,0.16687 -4.13999,0.80568 -6.02435,1.34486 -1.25299,0.35852 -1.34298,0.37513 -1.57705,0.29106 z" + style="opacity:0.75702485;fill:#000000;fill-opacity:0.87815121;fill-rule:nonzero;stroke:#000000;stroke-width:0.17677669;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate" + inkscape:connector-curvature="0" /> + id="path5995-1" + d="m 111.5536,301.25626 c -0.045,-0.0423 -0.36029,-0.17641 -0.70059,-0.29795 -2.98664,-1.06672 -5.29595,-2.45378 -6.47182,-3.88726 l -0.37827,-0.46114 0,-2.86361 0,-2.86362 0.81759,-0.81133 c 0.96413,-0.95675 1.99468,-1.6618 3.51344,-2.40369 1.12874,-0.55138 3.15058,-1.35624 3.40691,-1.35624 0.0783,0 0.68619,0.15689 1.35084,0.34865 1.87621,0.5413 5.4076,1.31736 6.08955,1.33824 0.13499,0.004 -0.15231,0.0797 -0.63845,0.16791 -5.27086,0.95653 -9.67144,2.6835 -11.3361,4.44875 -0.73345,0.77777 -0.82416,1.25546 -0.3689,1.94278 0.84622,1.27756 3.69371,2.76474 7.21735,3.76945 1.3114,0.37393 3.12178,0.80159 4.16219,0.98323 0.72873,0.12722 0.69,0.19167 -0.20487,0.34097 -1.75461,0.29273 -6.00723,1.37494 -6.27448,1.59674 -0.0584,0.0485 -0.13774,0.052 -0.18439,0.008 l 0,0 z" + style="opacity:0.75702485;fill:#000000;fill-opacity:0.87815121;fill-rule:nonzero;stroke:#000000;stroke-width:0.17677669;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate" + inkscape:connector-curvature="0" /> + id="path5997-7" + d="m 110.89434,285.8727 c -2.97974,-1.05932 -5.15513,-2.36041 -6.44977,-3.85759 l -0.39746,-0.45963 -0.0442,-1.26806 -0.0442,-1.26804 -0.41984,-0.0269 -0.41985,-0.0269 0,-0.41853 c 0,-2.04404 1.89922,-4.27268 4.94131,-5.7984 7.8506,-3.93735 23.33188,-5.37648 36.60122,-3.40242 6.96201,1.03573 12.37864,2.99776 15.01528,5.43888 1.14578,1.06082 1.83041,2.37849 1.93171,3.71784 l 0.0491,0.64909 -0.5431,0 -0.54308,0 0,1.29299 0,1.293 -0.7292,0.72794 c -1.34862,1.34626 -3.49442,2.54869 -6.24118,3.49732 l -0.73899,0.25523 -1.02877,-0.30443 c -1.36151,-0.40289 -3.30364,-0.86466 -4.82947,-1.14827 -0.68059,-0.12651 -1.2971,-0.25257 -1.37002,-0.28014 -0.0729,-0.0276 0.50381,-0.18009 1.28163,-0.33894 4.3054,-0.87926 7.47825,-2.0111 9.54594,-3.40527 0.6798,-0.45836 1.41207,-1.19087 1.54806,-1.54855 0.22369,-0.58836 0.003,-1.17927 -0.6779,-1.81513 -3.16521,-2.95581 -12.32714,-5.13291 -22.61369,-5.37357 -7.26643,-0.17 -13.812,0.44413 -19.25547,1.80661 -4.75881,1.19111 -8.29591,3.03876 -8.81221,4.60318 -0.14583,0.44185 0.0807,0.98274 0.65011,1.55212 1.69707,1.69707 5.63092,3.23496 10.88895,4.25688 l 1.06066,0.20615 -0.88389,0.15531 c -1.89698,0.33331 -5.08622,1.10202 -6.19472,1.49312 -0.33577,0.11847 -0.39379,0.10916 -1.27696,-0.20481 l 0,0 z" + style="opacity:0.75702485;fill:#000000;fill-opacity:0.87815121;fill-rule:nonzero;stroke:#000000;stroke-width:0.17677669;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate" + inkscape:connector-curvature="0" /> + id="path5999-4" + d="m 104.00292,301.29849 0,-4.46491 0.37565,0.41213 c 1.26058,1.383 2.85364,2.38503 5.54637,3.48865 0.80212,0.32875 1.39653,0.59893 1.3209,0.6004 -0.22966,0.004 -1.64718,0.59841 -2.95609,1.2386 -1.79001,0.87551 -3.0005,1.73092 -3.91118,2.76393 l -0.37565,0.4261 0,-4.4649 z" + style="opacity:0.75702485;fill:#000000;fill-opacity:0.87815121;fill-rule:nonzero;stroke:#000000;stroke-width:0.17677669;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate" + inkscape:connector-curvature="0" /> + id="path6001-3" + d="m 104.00292,286.20941 0,-4.45445 0.7734,0.75827 c 0.85461,0.83789 2.02948,1.66147 3.23091,2.26487 0.86357,0.43371 2.45502,1.0994 3.04466,1.27357 0.43559,0.12865 0.47418,0.2162 0.13258,0.30072 -0.3889,0.0962 -2.28836,0.90937 -3.21955,1.37828 -1.40705,0.70853 -2.17903,1.25122 -3.34651,2.35256 l -0.61549,0.58063 0,-4.45445 z" + style="opacity:0.75702485;fill:#000000;fill-opacity:0.87815121;fill-rule:nonzero;stroke:#000000;stroke-width:0.17677669;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate" + inkscape:connector-curvature="0" /> + id="path6003-1" + d="m 104.00292,316.2336 0,-4.3939 0.7734,0.7244 c 1.34707,1.26172 3.30309,2.41097 5.56846,3.27175 0.5469,0.20781 0.99437,0.40523 0.99437,0.43872 0,0.0335 -0.16904,0.1105 -0.37565,0.17113 -0.56633,0.16617 -2.0255,0.76261 -2.74379,1.12152 -1.39667,0.69789 -2.56351,1.50067 -3.59351,2.47231 l -0.62328,0.58798 0,-4.39391 z" + style="opacity:0.75702485;fill:#000000;fill-opacity:0.87815121;fill-rule:nonzero;stroke:#000000;stroke-width:0.17677669;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate" + inkscape:connector-curvature="0" /> + id="path6005-4" + d="m 159.86435,319.89194 c -1.31096,-1.22228 -3.06266,-2.24617 -5.38968,-3.15032 -0.56016,-0.21764 -1.04794,-0.42322 -1.08396,-0.45684 -0.036,-0.0336 0.36174,-0.21554 0.88388,-0.40427 2.03146,-0.73426 4.16877,-1.96276 5.545,-3.1872 l 0.75187,-0.66894 0,4.24667 c 0,2.33567 -0.01,4.24493 -0.0221,4.24282 -0.0122,-0.002 -0.3204,-0.28198 -0.68501,-0.62192 z" + style="opacity:0.75702485;fill:#000000;fill-opacity:0.87815121;fill-rule:nonzero;stroke:#000000;stroke-width:0.17677669;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate" + inkscape:connector-curvature="0" /> + id="path6007-6" + d="m 159.90279,304.88522 c -1.21447,-1.16855 -3.39408,-2.44074 -5.59677,-3.26672 -0.50488,-0.18932 -0.88274,-0.3653 -0.83969,-0.39106 0.043,-0.0258 0.51578,-0.21725 1.05053,-0.42553 2.14873,-0.83689 4.19012,-2.0469 5.41296,-3.20849 l 0.64164,-0.60949 0,4.25388 c 0,2.33963 -0.01,4.25261 -0.0221,4.25107 -0.0122,-0.002 -0.30311,-0.27319 -0.64657,-0.60367 l 0,1e-5 z" + style="opacity:0.75702485;fill:#000000;fill-opacity:0.87815121;fill-rule:nonzero;stroke:#000000;stroke-width:0.17677669;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate" + inkscape:connector-curvature="0" /> + id="path6009-9" + d="m 159.95274,290.00706 c -1.46246,-1.37246 -2.90851,-2.23054 -5.40185,-3.20541 l -1.3802,-0.53963 1.24762,-0.4807 c 2.59064,-0.99815 4.06312,-1.86608 5.59152,-3.29583 l 0.56163,-0.52537 0,4.29696 c 0,2.36334 -0.01,4.29522 -0.0221,4.29306 -0.0122,-0.002 -0.28063,-0.24654 -0.59662,-0.54308 l 0,0 z" + style="opacity:0.75702485;fill:#000000;fill-opacity:0.87815121;fill-rule:nonzero;stroke:#000000;stroke-width:0.17677669;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate" + inkscape:connector-curvature="0" /> + + + + + + + Cloudstorage + + Tahoe-LAFS storage protocol - Tahoe-LAFS web-API + x="47.523022" + y="219.26503" + id="tspan2781-3" + style="font-size:10px;font-style:oblique;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:100%;writing-mode:lr-tb;text-anchor:start;font-family:DejaVu Sans;-inkscape-font-specification:DejaVu Sans Oblique">• Disk backend• Cloud backend under development (S3, OpenStack, Google, Azure) diff -Nru tahoe-lafs-1.9.2/docs/nodekeys.rst tahoe-lafs-1.10.0/docs/nodekeys.rst --- tahoe-lafs-1.9.2/docs/nodekeys.rst 1970-01-01 00:00:00.000000000 +0000 +++ tahoe-lafs-1.10.0/docs/nodekeys.rst 2013-09-03 15:38:27.000000000 +0000 @@ -0,0 +1,133 @@ +======================= +Node Keys in Tahoe-LAFS +======================= + +"Node Keys" are cryptographic signing/verifying keypairs used to +identify Tahoe-LAFS nodes (client-only and client+server). The private +signing key is stored in NODEDIR/private/node.privkey , and is used to +sign the announcements that are distributed to all nodes by the +Introducer. The public verifying key is used to identify the sending +node from those other systems: it is displayed as a "Node ID" that looks +like "v0-abc234xyz567..", which ends with a long base32-encoded string. + +These node keys were introduced in the 1.10 release (April 2013), as +part of ticket #466. In previous releases, announcements were unsigned, +and nodes were identified by their Foolscap "Tub ID" (a somewhat shorter +base32 string, with no "v0-" prefix). + +Why Announcements Are Signed +---------------------------- + +All nodes (both client-only and client+server) publish announcements to +the Introducer, which then relays them to all other nodes. These +announcements contain information about the publishing node's nickname, +how to reach the node, what services it offers, and what version of code +it is running. + +The new private node key is used to sign these announcements, preventing +the Introducer from modifying their contents en-route. This will enable +future versions of Tahoe-LAFS to use other forms of introduction +(gossip, multiple introducers) without weakening the security model. + +The Node ID is useful as a handle with which to talk about a node. For +example, when clients eventually gain the ability to control which +storage servers they are willing to use (#467), the configuration file +might simply include a list of Node IDs for the approved servers. + +TubIDs are currently also suitable for this job, but they depend upon +having a Foolscap connection to the server. Since our goal is to move +away from Foolscap towards a simpler (faster and more portable) +protocol, we want to reduce our dependence upon TubIDs. Node IDs and +Ed25519 signatures can be used for non-Foolscap non-SSL based protocols. + +How The Node ID Is Computed +--------------------------- + +The long-form Node ID is the Ed25519 public verifying key, 256 bits (32 +bytes) long, base32-encoded, with a "v0-" prefix appended, and the +trailing "=" padding removed, like so: + + v0-rlj3jnxqv4ee5rtpyngvzbhmhuikjfenjve7j5mzmfcxytwmyf6q + +The Node ID is displayed in this long form on the node's front Welcome +page, and on the Introducer's status page. In most other places +(share-placement lists, file health displays), the "short form" is used +instead. This is simply the first 8 characters of the base32 portion, +frequently enclosed in square brackets, like this: + + [rlj3jnxq] + +In contrast, old-style TubIDs are usually displayed with just 6 base32 +characters. + +Version Compatibility, Fallbacks For Old Versions +------------------------------------------------- + +Since Tahoe-LAFS 1.9 does not know about signed announcements, 1.10 +includes backwards-compatibility code to allow old and new versions to +interoperate. There are three relevant participants: the node publishing +an announcement, the Introducer which relays them, and the node +receiving the (possibly signed) announcement. + +When a 1.10 node connects to an old Introducer (version 1.9 or earlier), +it sends downgraded non-signed announcements. It likewise accepts +non-signed announcements from the Introducer. The non-signed +announcements use TubIDs to identify the sending node. The new 1.10 +Introducer, when it connects to an old node, downgrades any signed +announcements to non-signed ones before delivery. + +As a result, the only way to receive signed announcements is for all +three systems to be running the new 1.10 code. In a grid with a mixture +of old and new nodes, if the Introducer is old, then all nodes will see +unsigned TubIDs. If the Introducer is new, then nodes will see signed +Node IDs whenever possible. + +Share Placement +--------------- + +Tahoe-LAFS uses a "permuted ring" algorithm to decide where to place +shares for any given file. For each potential server, it uses that +server's "permutation seed" to compute a pseudo-random but deterministic +location on a ring, then walks the ring in clockwise order, asking each +server in turn to hold a share until all are placed. When downloading a +file, the servers are accessed in the same order. This minimizes the +number of queries that must be done to download a file, and tolerates +"churn" (nodes being added and removed from the grid) fairly well. + +This property depends upon server nodes having a stable permutation +seed. If a server's permutation seed were to change, it would +effectively wind up at a randomly selected place on the permuted ring. +Downloads would still complete, but clients would spend more time asking +other servers before querying the correct one. + +In the old 1.9 code, the permutation-seed was always equal to the TubID. +In 1.10, servers include their permutation-seed as part of their +announcement. To improve stability for existing grids, if an old server +(one with existing shares) is upgraded to run the 1.10 codebase, it will +use its old TubID as its permutation-seed. When a new empty server runs +the 1.10 code, it will use its Node ID instead. In both cases, once the +node has picked a permutation-seed, it will continue using that value +forever. + +To be specific, when a node wakes up running the 1.10 code, it will look +for a recorded NODEDIR/permutation-seed file, and use its contents if +present. If that file does not exist, it creates it (with the TubID if +it has any shares, otherwise with the Node ID), and uses the contents as +the permutation-seed. + +There is one unfortunate consequence of this pattern. If new 1.10 server +is created in a grid that has an old client, or has a new client but an +old Introducer, then that client will see downgraded non-signed +announcements, and thus will first upload shares with the TubID-based +permutation-seed. Later, when the client and/or Introducer is upgraded, +the client will start seeing signed announcements with the NodeID-based +permutation-seed, and will then look for shares in the wrong place. This +will hurt performance in a large grid, but should not affect +reliability. This effect shouldn't even be noticeable in grids for which +the number of servers is close to the "N" shares.total number (e.g. +where num-servers < 3*N). And the as-yet-unimplemented "share +rebalancing" feature should repair the misplacement. + +If you wish to avoid this effect, try to upgrade both Introducers and +clients at about the same time. (Upgrading servers does not matter: they +will continue to use the old permutation-seed). diff -Nru tahoe-lafs-1.9.2/docs/performance.rst.orig tahoe-lafs-1.10.0/docs/performance.rst.orig --- tahoe-lafs-1.9.2/docs/performance.rst.orig 2012-05-14 02:24:31.000000000 +0000 +++ tahoe-lafs-1.10.0/docs/performance.rst.orig 1970-01-01 00:00:00.000000000 +0000 @@ -1,229 +0,0 @@ -============================================ -Performance costs for some common operations -============================================ - -1. `Publishing an A-byte immutable file`_ -2. `Publishing an A-byte mutable file`_ -3. `Downloading B bytes of an A-byte immutable file`_ -4. `Downloading B bytes of an A-byte mutable file`_ -5. `Modifying B bytes of an A-byte mutable file`_ -6. `Inserting/Removing B bytes in an A-byte mutable file`_ -7. `Adding an entry to an A-entry directory`_ -8. `Listing an A entry directory`_ -9. `Performing a file-check on an A-byte file`_ -10. `Performing a file-verify on an A-byte file`_ -11. `Repairing an A-byte file (mutable or immutable)`_ - -``K`` indicates the number of shares required to reconstruct the file -(default: 3) - -``N`` indicates the total number of shares produced (default: 10) - -``S`` indicates the segment size (default: 128 KiB) - -``A`` indicates the number of bytes in a file - -``B`` indicates the number of bytes of a file which are being read or -written - -``G`` indicates the number of storage servers on your grid - -Most of these cost estimates may have a further constant multiplier: when a -formula says ``N/K*S``, the cost may actually be ``2*N/K*S`` or ``3*N/K*S``. -Also note that all references to mutable files are for SDMF-formatted files; -this document has not yet been updated to describe the MDMF format. - -Publishing an ``A``-byte immutable file -======================================= - -when the file is already uploaded ---------------------------------- - -If the file is already uploaded with the exact same contents, same -erasure coding parameters (K, N), and same added convergence secret, -then it reads the whole file from disk one time while hashing it to -compute the storage index, then contacts about N servers to ask each -one to store a share. All of the servers reply that they already have -a copy of that share, and the upload is done. - -disk: A - -cpu: ~A - -network: ~N - -memory footprint: S - -when the file is not already uploaded -------------------------------------- - -If the file is not already uploaded with the exact same contents, same -erasure coding parameters (K, N), and same added convergence secret, -then it reads the whole file from disk one time while hashing it to -compute the storage index, then contacts about N servers to ask each -one to store a share. Then it uploads each share to a storage server. - -disk: 2*A - -cpu: 2*~A - -network: N/K*A - -memory footprint: N/K*S - -Publishing an ``A``-byte mutable file -===================================== - -cpu: ~A + a large constant for RSA keypair generation - -network: A - -memory footprint: N/K*A - -notes: Tahoe-LAFS generates a new RSA keypair for each mutable file that it -publishes to a grid. This takes up to 1 or 2 seconds on a typical desktop PC. - -Part of the process of encrypting, encoding, and uploading a mutable file to a -Tahoe-LAFS grid requires that the entire file be in memory at once. For larger -files, this may cause Tahoe-LAFS to have an unacceptably large memory footprint -(at least when uploading a mutable file). - -Downloading ``B`` bytes of an ``A``-byte immutable file -======================================================= - -cpu: ~B - -network: B - -notes: When Tahoe-LAFS 1.8.0 or later is asked to read an arbitrary -range of an immutable file, only the S-byte segments that overlap the -requested range will be downloaded. - -(Earlier versions would download from the beginning of the file up -until the end of the requested range, and then continue to download -the rest of the file even after the request was satisfied.) - -Downloading ``B`` bytes of an ``A``-byte mutable file -===================================================== - -cpu: ~A - -network: A - -memory footprint: A - -notes: As currently implemented, mutable files must be downloaded in -their entirety before any part of them can be read. We are -exploring fixes for this; see ticket #393 for more information. - -Modifying ``B`` bytes of an ``A``-byte mutable file -=================================================== - -cpu: ~A - -network: A - -memory footprint: N/K*A - -notes: If you upload a changed version of a mutable file that you -earlier put onto your grid with, say, 'tahoe put --mutable', -Tahoe-LAFS will replace the old file with the new file on the -grid, rather than attempting to modify only those portions of the -file that have changed. Modifying a file in this manner is -essentially uploading the file over again, except that it re-uses -the existing RSA keypair instead of generating a new one. - -Inserting/Removing ``B`` bytes in an ``A``-byte mutable file -============================================================ - -cpu: ~A - -network: A - -memory footprint: N/K*A - -notes: Modifying any part of a mutable file in Tahoe-LAFS requires that -the entire file be downloaded, modified, held in memory while it is -encrypted and encoded, and then re-uploaded. A future version of the -mutable file layout ("LDMF") may provide efficient inserts and -deletes. Note that this sort of modification is mostly used internally -for directories, and isn't something that the WUI, CLI, or other -interfaces will do -- instead, they will simply overwrite the file to -be modified, as described in "Modifying B bytes of an A-byte mutable -file". - -Adding an entry to an ``A``-entry directory -=========================================== - -cpu: ~A - -network: ~A - -memory footprint: N/K*~A - -notes: In Tahoe-LAFS, directories are implemented as specialized mutable -files. So adding an entry to a directory is essentially adding B -(actually, 300-330) bytes somewhere in an existing mutable file. - -Listing an ``A`` entry directory -================================ - -cpu: ~A - -network: ~A - -memory footprint: N/K*~A - -notes: Listing a directory requires that the mutable file storing the -directory be downloaded from the grid. So listing an A entry -directory requires downloading a (roughly) 330 * A byte mutable -file, since each directory entry is about 300-330 bytes in size. - -Performing a file-check on an ``A``-byte file -============================================= - -cpu: ~G - -network: ~G - -memory footprint: negligible - -notes: To check a file, Tahoe-LAFS queries all the servers that it knows -about. Note that neither of these values directly depend on the size -of the file. This is relatively inexpensive, compared to the verify -and repair operations. - -Performing a file-verify on an ``A``-byte file -============================================== - -cpu: ~N/K*A - -network: N/K*A - -memory footprint: N/K*S - -notes: To verify a file, Tahoe-LAFS downloads all of the ciphertext -shares that were originally uploaded to the grid and integrity checks -them. This is (for well-behaved grids) more expensive than downloading -an A-byte file, since only a fraction of these shares are necessary to -recover the file. - -Repairing an ``A``-byte file (mutable or immutable) -=================================================== - -cpu: variable, between ~A and ~N/K*A - -network: variable; between A and N/K*A - -memory footprint (immutable): (1+N/K)*S - (SDMF mutable): (1+N/K)*A - -notes: To repair a file, Tahoe-LAFS downloads the file, and -generates/uploads missing shares in the same way as when it initially -uploads the file. So, depending on how many shares are missing, this -can cost as little as a download or as much as a download followed by -a full upload. - -Since SDMF files have only one segment, which must be processed in its -entirety, repair requires a full-file download followed by a full-file -upload. diff -Nru tahoe-lafs-1.9.2/docs/performance.rst.rej tahoe-lafs-1.10.0/docs/performance.rst.rej --- tahoe-lafs-1.9.2/docs/performance.rst.rej 2012-06-23 22:35:49.000000000 +0000 +++ tahoe-lafs-1.10.0/docs/performance.rst.rej 1970-01-01 00:00:00.000000000 +0000 @@ -1,29 +0,0 @@ ---- docs/performance.rst 2012-06-17 19:38:52.624294554 -0300 -+++ docs/performance.rst 2012-06-17 19:38:52.990959790 -0300 -@@ -202,8 +202,24 @@ - - notes: To verify a file, Tahoe-LAFS downloads all of the ciphertext shares - that were originally uploaded to the grid and integrity checks them. This is --(for well-behaved grids) more expensive than downloading an A-byte file, --since only a fraction of these shares are necessary to recover the file. -+(for grids with good redundancy) more expensive than downloading an A-byte -+file, since only a fraction of these shares would be necessary to recover the -+file. -+ -+Verifying an A-byte file (mutable) -+================================== -+ -+cpu: ~N/K*A -+ -+network: N/K*A -+ -+memory footprint: N/K*A -+ -+notes: To verify a file, Tahoe-LAFS downloads all of the ciphertext shares -+that were originally uploaded to the grid and integrity checks them. This is -+(for grids with good redundancy) more expensive than downloading an A-byte -+file, since only a fraction of these shares would be necessary to recover the -+file. - - Repairing an ``A``-byte file (mutable or immutable) - =================================================== diff -Nru tahoe-lafs-1.9.2/docs/quickstart.rst tahoe-lafs-1.10.0/docs/quickstart.rst --- tahoe-lafs-1.9.2/docs/quickstart.rst 2012-06-21 23:48:39.000000000 +0000 +++ tahoe-lafs-1.10.0/docs/quickstart.rst 2013-09-03 15:38:27.000000000 +0000 @@ -31,21 +31,21 @@ -------------- Check if you already have an adequate version of Python installed by running -``python -V``. Python v2.4 (v2.4.4 or greater), Python v2.5, Python v2.6, or -Python v2.7 will work. Python v3 does not work. On Windows, we recommend the -use of native Python, not Cygwin. If you don't have one of these versions of -Python installed, download and install `Python v2.7`_. Make sure that the -path to the installation directory has no spaces in it (e.g. on Windows, do -not install Python in the "Program Files" directory). +``python -V``. Python v2.6 (v2.6.6 or greater recommended) or Python v2.7 will +work. Python v3 does not work. On Windows, we recommend the use of native +Python v2.7, not Cygwin Python. If you don't have one of these versions of +Python installed, download and install `Python v2.7`_. Make sure that the path +to the installation directory has no spaces in it (e.g. on Windows, do not +install Python in the "Program Files" directory). -.. _Python v2.7: http://www.python.org/download/releases/2.7.2/ +.. _Python v2.7: http://www.python.org/download/releases/2.7.4/ Get Tahoe-LAFS -------------- -Download the latest stable release, `Tahoe-LAFS v1.9.2`_. +Download the latest stable release, `Tahoe-LAFS v1.10.0`_. -.. _Tahoe-LAFS v1.9.2: https://tahoe-lafs.org/source/tahoe-lafs/releases/allmydata-tahoe-1.9.2.zip +.. _Tahoe-LAFS v1.10.0: https://tahoe-lafs.org/source/tahoe-lafs/releases/allmydata-tahoe-1.10.0.zip Set Up Tahoe-LAFS ----------------- @@ -60,13 +60,13 @@ (or, on XP and earlier, to log out and back in again). This is needed the first time you set up Tahoe-LAFS on a particular installation of Windows. -Optionally run ``python setup.py test`` to verify that it passes all of its -self-tests. - Run ``bin/tahoe --version`` (on Windows, ``bin\tahoe --version``) to verify that the executable tool prints out the right version number after "``allmydata-tahoe:``". +Optionally run ``python setup.py trial`` to verify that it passes all of its +self-tests. + Run Tahoe-LAFS -------------- diff -Nru tahoe-lafs-1.9.2/docs/running.rst tahoe-lafs-1.10.0/docs/running.rst --- tahoe-lafs-1.9.2/docs/running.rst 2012-05-14 02:07:12.000000000 +0000 +++ tahoe-lafs-1.10.0/docs/running.rst 2013-09-03 15:38:27.000000000 +0000 @@ -47,10 +47,11 @@ name of the directory is up to you), ``cd`` into it, and run "``tahoe create-introducer .``". Now run the introducer using "``tahoe start .``". After it starts, it will write a file named -``introducer.furl`` in that base directory. This file contains the URL -the other nodes must use in order to connect to this introducer. (Note -that "``tahoe run .``" doesn't work for introducers, this is a known -issue: `#937 `_.) +``introducer.furl`` into the ``private/`` subdirectory of that base +directory. This file contains the URL the other nodes must use in order +to connect to this introducer. (Note that "``tahoe run .``" doesn't +work for introducers, this is a known issue: `#937 +`_.) The "``tahoe run``" command above will run the node in the foreground. On Unix, you can run it in the background instead by using the diff -Nru tahoe-lafs-1.9.2/docs/specifications/backends/raic.rst tahoe-lafs-1.10.0/docs/specifications/backends/raic.rst --- tahoe-lafs-1.9.2/docs/specifications/backends/raic.rst 2012-06-11 03:36:46.000000000 +0000 +++ tahoe-lafs-1.10.0/docs/specifications/backends/raic.rst 2013-09-03 15:38:27.000000000 +0000 @@ -175,8 +175,8 @@ disk, memory, cloud storage, and API usage. -Network usage—bandwidth and number-of-round-trips -------------------------------------------------- +Network usage: bandwidth and number-of-round-trips +-------------------------------------------------- When a Tahoe-LAFS storage client allocates a new share on a storage server, the backend will request a list of the existing cloud objects with the @@ -324,14 +324,15 @@ ============ This design worsens a known “write hole” issue in Tahoe-LAFS when updating -the contents of mutable files. An update to a mutable file can require changing -the contents of multiple chunks, and if the client fails or is disconnected -during the operation the resulting state of the stored cloud objects may be -inconsistent—no longer containing all of the old version, but not yet containing -all of the new version. A mutable share can be left in an inconsistent state -even by the existing Tahoe-LAFS disk backend if it fails during a write, but -that has a smaller chance of occurrence because the current client behavior -leads to mutable shares being written to disk in a single system call. +the contents of mutable files. An update to a mutable file can require +changing the contents of multiple chunks, and if the client fails or is +disconnected during the operation the resulting state of the stored cloud +objects may be inconsistent: no longer containing all of the old version, but +not yet containing all of the new version. A mutable share can be left in an +inconsistent state even by the existing Tahoe-LAFS disk backend if it fails +during a write, but that has a smaller chance of occurrence because the +current client behavior leads to mutable shares being written to disk in a +single system call. The best fix for this issue probably requires changing the Tahoe-LAFS storage protocol, perhaps by extending it to use a two-phase or three-phase commit diff -Nru tahoe-lafs-1.9.2/misc/build_helpers/gen-package-table.py tahoe-lafs-1.10.0/misc/build_helpers/gen-package-table.py --- tahoe-lafs-1.9.2/misc/build_helpers/gen-package-table.py 2012-05-14 02:07:13.000000000 +0000 +++ tahoe-lafs-1.10.0/misc/build_helpers/gen-package-table.py 2013-09-03 15:38:27.000000000 +0000 @@ -66,6 +66,9 @@ width = 100 / (len(platform_independent_pkgs) + 1) +greybgstyle = '; background-color: #E0E0E0' +nobgstyle = '' + print '' print '' print '' @@ -73,9 +76,12 @@ print ' Software packages that Tahoe-LAFS depends on' print '' print '' +print '

What is this?

' +print '

See quickstart.rst, wiki:Installation, and wiki:CompileError.' print '

Software packages that Tahoe-LAFS depends on

' print for pyver in reversed(sorted(python_versions)): + greybackground = False if pyver: print '

Packages for Python %s that have compiled C/C++ code:

' % (pyver,) print '' @@ -87,9 +93,16 @@ first = True for platform in sorted(matrix[pyver]): + if greybackground: + bgstyle = greybgstyle + else: + bgstyle = nobgstyle + greybackground = not greybackground row_files = sorted(matrix[pyver][platform]) - style1 = first and 'border-top: 2px solid #000000; background-color: #FFFFF0' or 'background-color: #FFFFF0' + style1 = first and 'border-top: 2px solid #000000' or '' + style1 += bgstyle style2 = first and 'border-top: 2px solid #000000' or '' + style2 += bgstyle print ' ' print ' ' % (style1, platform,) for pkg in sorted(platform_dependent_pkgs): diff -Nru tahoe-lafs-1.9.2/misc/operations_helpers/provisioning/provisioning.py tahoe-lafs-1.10.0/misc/operations_helpers/provisioning/provisioning.py --- tahoe-lafs-1.9.2/misc/operations_helpers/provisioning/provisioning.py 1970-01-01 00:00:00.000000000 +0000 +++ tahoe-lafs-1.10.0/misc/operations_helpers/provisioning/provisioning.py 2013-09-03 15:38:27.000000000 +0000 @@ -0,0 +1,776 @@ + +from nevow import inevow, rend, loaders, tags as T +import math +import util + +# factorial and binomial copied from +# http://mail.python.org/pipermail/python-list/2007-April/435718.html + +def div_ceil(n, d): + """ + The smallest integer k such that k*d >= n. + """ + return (n/d) + (n%d != 0) + +def factorial(n): + """factorial(n): return the factorial of the integer n. + factorial(0) = 1 + factorial(n) with n<0 is -factorial(abs(n)) + """ + result = 1 + for i in xrange(1, abs(n)+1): + result *= i + assert n >= 0 + return result + +def binomial(n, k): + assert 0 <= k <= n + if k == 0 or k == n: + return 1 + # calculate n!/k! as one product, avoiding factors that + # just get canceled + P = k+1 + for i in xrange(k+2, n+1): + P *= i + # if you are paranoid: + # C, rem = divmod(P, factorial(n-k)) + # assert rem == 0 + # return C + return P//factorial(n-k) + +class ProvisioningTool(rend.Page): + addSlash = True + docFactory = loaders.xmlfile(util.sibling("provisioning.xhtml")) + + def render_forms(self, ctx, data): + req = inevow.IRequest(ctx) + + def getarg(name, astype=int): + if req.method != "POST": + return None + if name in req.fields: + return astype(req.fields[name].value) + return None + return self.do_forms(getarg) + + + def do_forms(self, getarg): + filled = getarg("filled", bool) + + def get_and_set(name, options, default=None, astype=int): + current_value = getarg(name, astype) + i_select = T.select(name=name) + for (count, description) in options: + count = astype(count) + if ((current_value is not None and count == current_value) or + (current_value is None and count == default)): + o = T.option(value=str(count), selected="true")[description] + else: + o = T.option(value=str(count))[description] + i_select = i_select[o] + if current_value is None: + current_value = default + return current_value, i_select + + sections = {} + def add_input(section, text, entry): + if section not in sections: + sections[section] = [] + sections[section].extend([T.div[text, ": ", entry], "\n"]) + + def add_output(section, entry): + if section not in sections: + sections[section] = [] + sections[section].extend([entry, "\n"]) + + def build_section(section): + return T.fieldset[T.legend[section], sections[section]] + + def number(value, suffix=""): + scaling = 1 + if value < 1: + fmt = "%1.2g%s" + elif value < 100: + fmt = "%.1f%s" + elif value < 1000: + fmt = "%d%s" + elif value < 1e6: + fmt = "%.2fk%s"; scaling = 1e3 + elif value < 1e9: + fmt = "%.2fM%s"; scaling = 1e6 + elif value < 1e12: + fmt = "%.2fG%s"; scaling = 1e9 + elif value < 1e15: + fmt = "%.2fT%s"; scaling = 1e12 + elif value < 1e18: + fmt = "%.2fP%s"; scaling = 1e15 + else: + fmt = "huge! %g%s" + return fmt % (value / scaling, suffix) + + user_counts = [(5, "5 users"), + (50, "50 users"), + (200, "200 users"), + (1000, "1k users"), + (10000, "10k users"), + (50000, "50k users"), + (100000, "100k users"), + (500000, "500k users"), + (1000000, "1M users"), + ] + num_users, i_num_users = get_and_set("num_users", user_counts, 50000) + add_input("Users", + "How many users are on this network?", i_num_users) + + files_per_user_counts = [(100, "100 files"), + (1000, "1k files"), + (10000, "10k files"), + (100000, "100k files"), + (1e6, "1M files"), + ] + files_per_user, i_files_per_user = get_and_set("files_per_user", + files_per_user_counts, + 1000) + add_input("Users", + "How many files for each user? (avg)", + i_files_per_user) + + space_per_user_sizes = [(1e6, "1MB"), + (10e6, "10MB"), + (100e6, "100MB"), + (200e6, "200MB"), + (1e9, "1GB"), + (2e9, "2GB"), + (5e9, "5GB"), + (10e9, "10GB"), + (100e9, "100GB"), + (1e12, "1TB"), + (2e12, "2TB"), + (5e12, "5TB"), + ] + # Estimate ~5gb per user as a more realistic case + space_per_user, i_space_per_user = get_and_set("space_per_user", + space_per_user_sizes, + 5e9) + add_input("Users", + "How much data for each user? (avg)", + i_space_per_user) + + sharing_ratios = [(1.0, "1.0x"), + (1.1, "1.1x"), + (2.0, "2.0x"), + ] + sharing_ratio, i_sharing_ratio = get_and_set("sharing_ratio", + sharing_ratios, 1.0, + float) + add_input("Users", + "What is the sharing ratio? (1.0x is no-sharing and" + " no convergence)", i_sharing_ratio) + + # Encoding parameters + encoding_choices = [("3-of-10-5", "3.3x (3-of-10, repair below 5)"), + ("3-of-10-8", "3.3x (3-of-10, repair below 8)"), + ("5-of-10-7", "2x (5-of-10, repair below 7)"), + ("8-of-10-9", "1.25x (8-of-10, repair below 9)"), + ("27-of-30-28", "1.1x (27-of-30, repair below 28"), + ("25-of-100-50", "4x (25-of-100, repair below 50)"), + ] + encoding_parameters, i_encoding_parameters = \ + get_and_set("encoding_parameters", + encoding_choices, "3-of-10-5", str) + encoding_pieces = encoding_parameters.split("-") + k = int(encoding_pieces[0]) + assert encoding_pieces[1] == "of" + n = int(encoding_pieces[2]) + # we repair the file when the number of available shares drops below + # this value + repair_threshold = int(encoding_pieces[3]) + + add_input("Servers", + "What are the default encoding parameters?", + i_encoding_parameters) + + # Server info + num_server_choices = [ (5, "5 servers"), + (10, "10 servers"), + (15, "15 servers"), + (30, "30 servers"), + (50, "50 servers"), + (100, "100 servers"), + (200, "200 servers"), + (300, "300 servers"), + (500, "500 servers"), + (1000, "1k servers"), + (2000, "2k servers"), + (5000, "5k servers"), + (10e3, "10k servers"), + (100e3, "100k servers"), + (1e6, "1M servers"), + ] + num_servers, i_num_servers = \ + get_and_set("num_servers", num_server_choices, 30, int) + add_input("Servers", + "How many servers are there?", i_num_servers) + + # availability is measured in dBA = -dBF, where 0dBF is 100% failure, + # 10dBF is 10% failure, 20dBF is 1% failure, etc + server_dBA_choices = [ (10, "90% [10dBA] (2.4hr/day)"), + (13, "95% [13dBA] (1.2hr/day)"), + (20, "99% [20dBA] (14min/day or 3.5days/year)"), + (23, "99.5% [23dBA] (7min/day or 1.75days/year)"), + (30, "99.9% [30dBA] (87sec/day or 9hours/year)"), + (40, "99.99% [40dBA] (60sec/week or 53min/year)"), + (50, "99.999% [50dBA] (5min per year)"), + ] + server_dBA, i_server_availability = \ + get_and_set("server_availability", + server_dBA_choices, + 20, int) + add_input("Servers", + "What is the server availability?", i_server_availability) + + drive_MTBF_choices = [ (40, "40,000 Hours"), + ] + drive_MTBF, i_drive_MTBF = \ + get_and_set("drive_MTBF", drive_MTBF_choices, 40, int) + add_input("Drives", + "What is the hard drive MTBF?", i_drive_MTBF) + # http://www.tgdaily.com/content/view/30990/113/ + # http://labs.google.com/papers/disk_failures.pdf + # google sees: + # 1.7% of the drives they replaced were 0-1 years old + # 8% of the drives they repalced were 1-2 years old + # 8.6% were 2-3 years old + # 6% were 3-4 years old, about 8% were 4-5 years old + + drive_size_choices = [ (100, "100 GB"), + (250, "250 GB"), + (500, "500 GB"), + (750, "750 GB"), + (1000, "1000 GB"), + (2000, "2000 GB"), + (3000, "3000 GB"), + ] + drive_size, i_drive_size = \ + get_and_set("drive_size", drive_size_choices, 3000, int) + drive_size = drive_size * 1e9 + add_input("Drives", + "What is the capacity of each hard drive?", i_drive_size) + drive_failure_model_choices = [ ("E", "Exponential"), + ("U", "Uniform"), + ] + drive_failure_model, i_drive_failure_model = \ + get_and_set("drive_failure_model", + drive_failure_model_choices, + "E", str) + add_input("Drives", + "How should we model drive failures?", i_drive_failure_model) + + # drive_failure_rate is in failures per second + if drive_failure_model == "E": + drive_failure_rate = 1.0 / (drive_MTBF * 1000 * 3600) + else: + drive_failure_rate = 0.5 / (drive_MTBF * 1000 * 3600) + + # deletion/gc/ownership mode + ownership_choices = [ ("A", "no deletion, no gc, no owners"), + ("B", "deletion, no gc, no owners"), + ("C", "deletion, share timers, no owners"), + ("D", "deletion, no gc, yes owners"), + ("E", "deletion, owner timers"), + ] + ownership_mode, i_ownership_mode = \ + get_and_set("ownership_mode", ownership_choices, + "A", str) + add_input("Servers", + "What is the ownership mode?", i_ownership_mode) + + # client access behavior + access_rates = [ (1, "one file per day"), + (10, "10 files per day"), + (100, "100 files per day"), + (1000, "1k files per day"), + (10e3, "10k files per day"), + (100e3, "100k files per day"), + ] + download_files_per_day, i_download_rate = \ + get_and_set("download_rate", access_rates, + 100, int) + add_input("Users", + "How many files are downloaded per day?", i_download_rate) + download_rate = 1.0 * download_files_per_day / (24*60*60) + + upload_files_per_day, i_upload_rate = \ + get_and_set("upload_rate", access_rates, + 10, int) + add_input("Users", + "How many files are uploaded per day?", i_upload_rate) + upload_rate = 1.0 * upload_files_per_day / (24*60*60) + + delete_files_per_day, i_delete_rate = \ + get_and_set("delete_rate", access_rates, + 10, int) + add_input("Users", + "How many files are deleted per day?", i_delete_rate) + delete_rate = 1.0 * delete_files_per_day / (24*60*60) + + + # the value is in days + lease_timers = [ (1, "one refresh per day"), + (7, "one refresh per week"), + ] + lease_timer, i_lease = \ + get_and_set("lease_timer", lease_timers, + 7, int) + add_input("Users", + "How frequently do clients refresh files or accounts? " + "(if necessary)", + i_lease) + seconds_per_lease = 24*60*60*lease_timer + + check_timer_choices = [ (1, "every week"), + (4, "every month"), + (8, "every two months"), + (16, "every four months"), + ] + check_timer, i_check_timer = \ + get_and_set("check_timer", check_timer_choices, 4, int) + add_input("Users", + "How frequently should we check on each file?", + i_check_timer) + file_check_interval = check_timer * 7 * 24 * 3600 + + + if filled: + add_output("Users", T.div["Total users: %s" % number(num_users)]) + add_output("Users", + T.div["Files per user: %s" % number(files_per_user)]) + file_size = 1.0 * space_per_user / files_per_user + add_output("Users", + T.div["Average file size: ", number(file_size)]) + total_files = num_users * files_per_user / sharing_ratio + + add_output("Grid", + T.div["Total number of files in grid: ", + number(total_files)]) + total_space = num_users * space_per_user / sharing_ratio + add_output("Grid", + T.div["Total volume of plaintext in grid: ", + number(total_space, "B")]) + + total_shares = n * total_files + add_output("Grid", + T.div["Total shares in grid: ", number(total_shares)]) + expansion = float(n) / float(k) + + total_usage = expansion * total_space + add_output("Grid", + T.div["Share data in grid: ", number(total_usage, "B")]) + + if n > num_servers: + # silly configuration, causes Tahoe2 to wrap and put multiple + # shares on some servers. + add_output("Servers", + T.div["non-ideal: more shares than servers" + " (n=%d, servers=%d)" % (n, num_servers)]) + # every file has at least one share on every server + buckets_per_server = total_files + shares_per_server = total_files * ((1.0 * n) / num_servers) + else: + # if nobody is full, then no lease requests will be turned + # down for lack of space, and no two shares for the same file + # will share a server. Therefore the chance that any given + # file has a share on any given server is n/num_servers. + buckets_per_server = total_files * ((1.0 * n) / num_servers) + # since each such represented file only puts one share on a + # server, the total number of shares per server is the same. + shares_per_server = buckets_per_server + add_output("Servers", + T.div["Buckets per server: ", + number(buckets_per_server)]) + add_output("Servers", + T.div["Shares per server: ", + number(shares_per_server)]) + + # how much space is used on the storage servers for the shares? + # the share data itself + share_data_per_server = total_usage / num_servers + add_output("Servers", + T.div["Share data per server: ", + number(share_data_per_server, "B")]) + # this is determined empirically. H=hashsize=32, for a one-segment + # file and 3-of-10 encoding + share_validation_per_server = 266 * shares_per_server + # this could be 423*buckets_per_server, if we moved the URI + # extension into a separate file, but that would actually consume + # *more* space (minimum filesize is 4KiB), unless we moved all + # shares for a given bucket into a single file. + share_uri_extension_per_server = 423 * shares_per_server + + # ownership mode adds per-bucket data + H = 32 # depends upon the desired security of delete/refresh caps + # bucket_lease_size is the amount of data needed to keep track of + # the delete/refresh caps for each bucket. + bucket_lease_size = 0 + client_bucket_refresh_rate = 0 + owner_table_size = 0 + if ownership_mode in ("B", "C", "D", "E"): + bucket_lease_size = sharing_ratio * 1.0 * H + if ownership_mode in ("B", "C"): + # refreshes per second per client + client_bucket_refresh_rate = (1.0 * n * files_per_user / + seconds_per_lease) + add_output("Users", + T.div["Client share refresh rate (outbound): ", + number(client_bucket_refresh_rate, "Hz")]) + server_bucket_refresh_rate = (client_bucket_refresh_rate * + num_users / num_servers) + add_output("Servers", + T.div["Server share refresh rate (inbound): ", + number(server_bucket_refresh_rate, "Hz")]) + if ownership_mode in ("D", "E"): + # each server must maintain a bidirectional mapping from + # buckets to owners. One way to implement this would be to + # put a list of four-byte owner numbers into each bucket, and + # a list of four-byte share numbers into each owner (although + # of course we'd really just throw it into a database and let + # the experts take care of the details). + owner_table_size = 2*(buckets_per_server * sharing_ratio * 4) + + if ownership_mode in ("E",): + # in this mode, clients must refresh one timer per server + client_account_refresh_rate = (1.0 * num_servers / + seconds_per_lease) + add_output("Users", + T.div["Client account refresh rate (outbound): ", + number(client_account_refresh_rate, "Hz")]) + server_account_refresh_rate = (client_account_refresh_rate * + num_users / num_servers) + add_output("Servers", + T.div["Server account refresh rate (inbound): ", + number(server_account_refresh_rate, "Hz")]) + + # TODO: buckets vs shares here is a bit wonky, but in + # non-wrapping grids it shouldn't matter + share_lease_per_server = bucket_lease_size * buckets_per_server + share_ownertable_per_server = owner_table_size + + share_space_per_server = (share_data_per_server + + share_validation_per_server + + share_uri_extension_per_server + + share_lease_per_server + + share_ownertable_per_server) + add_output("Servers", + T.div["Share space per server: ", + number(share_space_per_server, "B"), + " (data ", + number(share_data_per_server, "B"), + ", validation ", + number(share_validation_per_server, "B"), + ", UEB ", + number(share_uri_extension_per_server, "B"), + ", lease ", + number(share_lease_per_server, "B"), + ", ownertable ", + number(share_ownertable_per_server, "B"), + ")", + ]) + + + # rates + client_download_share_rate = download_rate * k + client_download_byte_rate = download_rate * file_size + add_output("Users", + T.div["download rate: shares = ", + number(client_download_share_rate, "Hz"), + " , bytes = ", + number(client_download_byte_rate, "Bps"), + ]) + total_file_check_rate = 1.0 * total_files / file_check_interval + client_check_share_rate = total_file_check_rate / num_users + add_output("Users", + T.div["file check rate: shares = ", + number(client_check_share_rate, "Hz"), + " (interval = %s)" % + number(1 / client_check_share_rate, "s"), + ]) + + client_upload_share_rate = upload_rate * n + # TODO: doesn't include overhead + client_upload_byte_rate = upload_rate * file_size * expansion + add_output("Users", + T.div["upload rate: shares = ", + number(client_upload_share_rate, "Hz"), + " , bytes = ", + number(client_upload_byte_rate, "Bps"), + ]) + client_delete_share_rate = delete_rate * n + + server_inbound_share_rate = (client_upload_share_rate * + num_users / num_servers) + server_inbound_byte_rate = (client_upload_byte_rate * + num_users / num_servers) + add_output("Servers", + T.div["upload rate (inbound): shares = ", + number(server_inbound_share_rate, "Hz"), + " , bytes = ", + number(server_inbound_byte_rate, "Bps"), + ]) + add_output("Servers", + T.div["share check rate (inbound): ", + number(total_file_check_rate * n / num_servers, + "Hz"), + ]) + + server_share_modify_rate = ((client_upload_share_rate + + client_delete_share_rate) * + num_users / num_servers) + add_output("Servers", + T.div["share modify rate: shares = ", + number(server_share_modify_rate, "Hz"), + ]) + + server_outbound_share_rate = (client_download_share_rate * + num_users / num_servers) + server_outbound_byte_rate = (client_download_byte_rate * + num_users / num_servers) + add_output("Servers", + T.div["download rate (outbound): shares = ", + number(server_outbound_share_rate, "Hz"), + " , bytes = ", + number(server_outbound_byte_rate, "Bps"), + ]) + + + total_share_space = num_servers * share_space_per_server + add_output("Grid", + T.div["Share space consumed: ", + number(total_share_space, "B")]) + add_output("Grid", + T.div[" %% validation: %.2f%%" % + (100.0 * share_validation_per_server / + share_space_per_server)]) + add_output("Grid", + T.div[" %% uri-extension: %.2f%%" % + (100.0 * share_uri_extension_per_server / + share_space_per_server)]) + add_output("Grid", + T.div[" %% lease data: %.2f%%" % + (100.0 * share_lease_per_server / + share_space_per_server)]) + add_output("Grid", + T.div[" %% owner data: %.2f%%" % + (100.0 * share_ownertable_per_server / + share_space_per_server)]) + add_output("Grid", + T.div[" %% share data: %.2f%%" % + (100.0 * share_data_per_server / + share_space_per_server)]) + add_output("Grid", + T.div["file check rate: ", + number(total_file_check_rate, + "Hz")]) + + total_drives = max(div_ceil(int(total_share_space), + int(drive_size)), + num_servers) + add_output("Drives", + T.div["Total drives: ", number(total_drives), " drives"]) + drives_per_server = div_ceil(total_drives, num_servers) + add_output("Servers", + T.div["Drives per server: ", drives_per_server]) + + # costs + if drive_size == 3000 * 1e9: + add_output("Servers", T.div["3000GB drive: $250 each"]) + drive_cost = 250 + else: + add_output("Servers", + T.div[T.b["unknown cost per drive, assuming $100"]]) + drive_cost = 100 + + if drives_per_server <= 4: + add_output("Servers", T.div["1U box with <= 4 drives: $1500"]) + server_cost = 1500 # typical 1U box + elif drives_per_server <= 12: + add_output("Servers", T.div["2U box with <= 12 drives: $2500"]) + server_cost = 2500 # 2U box + else: + add_output("Servers", + T.div[T.b["Note: too many drives per server, " + "assuming $3000"]]) + server_cost = 3000 + + server_capital_cost = (server_cost + drives_per_server * drive_cost) + total_server_cost = float(num_servers * server_capital_cost) + add_output("Servers", T.div["Capital cost per server: $", + server_capital_cost]) + add_output("Grid", T.div["Capital cost for all servers: $", + number(total_server_cost)]) + # $70/Mbps/mo + # $44/server/mo power+space + server_bandwidth = max(server_inbound_byte_rate, + server_outbound_byte_rate) + server_bandwidth_mbps = div_ceil(int(server_bandwidth*8), int(1e6)) + server_monthly_cost = 70*server_bandwidth_mbps + 44 + add_output("Servers", T.div["Monthly cost per server: $", + server_monthly_cost]) + add_output("Users", T.div["Capital cost per user: $", + number(total_server_cost / num_users)]) + + # reliability + any_drive_failure_rate = total_drives * drive_failure_rate + any_drive_MTBF = 1 // any_drive_failure_rate # in seconds + any_drive_MTBF_days = any_drive_MTBF / 86400 + add_output("Drives", + T.div["MTBF (any drive): ", + number(any_drive_MTBF_days), " days"]) + drive_replacement_monthly_cost = (float(drive_cost) + * any_drive_failure_rate + *30*86400) + add_output("Grid", + T.div["Monthly cost of replacing drives: $", + number(drive_replacement_monthly_cost)]) + + total_server_monthly_cost = float(num_servers * server_monthly_cost + + drive_replacement_monthly_cost) + + add_output("Grid", T.div["Monthly cost for all servers: $", + number(total_server_monthly_cost)]) + add_output("Users", + T.div["Monthly cost per user: $", + number(total_server_monthly_cost / num_users)]) + + # availability + file_dBA = self.file_availability(k, n, server_dBA) + user_files_dBA = self.many_files_availability(file_dBA, + files_per_user) + all_files_dBA = self.many_files_availability(file_dBA, total_files) + add_output("Users", + T.div["availability of: ", + "arbitrary file = %d dBA, " % file_dBA, + "all files of user1 = %d dBA, " % user_files_dBA, + "all files in grid = %d dBA" % all_files_dBA, + ], + ) + + time_until_files_lost = (n-k+1) / any_drive_failure_rate + add_output("Grid", + T.div["avg time until files are lost: ", + number(time_until_files_lost, "s"), ", ", + number(time_until_files_lost/86400, " days"), + ]) + + share_data_loss_rate = any_drive_failure_rate * drive_size + add_output("Grid", + T.div["share data loss rate: ", + number(share_data_loss_rate,"Bps")]) + + # the worst-case survival numbers occur when we do a file check + # and the file is just above the threshold for repair (so we + # decide to not repair it). The question is then: what is the + # chance that the file will decay so badly before the next check + # that we can't recover it? The resulting probability is per + # check interval. + # Note that the chances of us getting into this situation are low. + P_disk_failure_during_interval = (drive_failure_rate * + file_check_interval) + disk_failure_dBF = 10*math.log10(P_disk_failure_during_interval) + disk_failure_dBA = -disk_failure_dBF + file_survives_dBA = self.file_availability(k, repair_threshold, + disk_failure_dBA) + user_files_survives_dBA = self.many_files_availability( \ + file_survives_dBA, files_per_user) + all_files_survives_dBA = self.many_files_availability( \ + file_survives_dBA, total_files) + add_output("Users", + T.div["survival of: ", + "arbitrary file = %d dBA, " % file_survives_dBA, + "all files of user1 = %d dBA, " % + user_files_survives_dBA, + "all files in grid = %d dBA" % + all_files_survives_dBA, + " (per worst-case check interval)", + ]) + + + + all_sections = [] + all_sections.append(build_section("Users")) + all_sections.append(build_section("Servers")) + all_sections.append(build_section("Drives")) + if "Grid" in sections: + all_sections.append(build_section("Grid")) + + f = T.form(action=".", method="post", enctype="multipart/form-data") + + if filled: + action = "Recompute" + else: + action = "Compute" + + f = f[T.input(type="hidden", name="filled", value="true"), + T.input(type="submit", value=action), + all_sections, + ] + + try: + from allmydata import reliability + # we import this just to test to see if the page is available + _hush_pyflakes = reliability + del _hush_pyflakes + f = [T.div[T.a(href="../reliability")["Reliability Math"]], f] + except ImportError: + pass + + return f + + def file_availability(self, k, n, server_dBA): + """ + The full formula for the availability of a specific file is:: + + 1 - sum([choose(N,i) * p**i * (1-p)**(N-i)] for i in range(k)]) + + Where choose(N,i) = N! / ( i! * (N-i)! ) . Note that each term of + this summation is the probability that there are exactly 'i' servers + available, and what we're doing is adding up the cases where i is too + low. + + This is a nuisance to calculate at all accurately, especially once N + gets large, and when p is close to unity. So we make an engineering + approximation: if (1-p) is very small, then each [i] term is much + larger than the [i-1] term, and the sum is dominated by the i=k-1 + term. This only works for (1-p) < 10%, and when the choose() function + doesn't rise fast enough to compensate. For high-expansion encodings + (3-of-10, 25-of-100), the choose() function is rising at the same + time as the (1-p)**(N-i) term, so that's not an issue. For + low-expansion encodings (7-of-10, 75-of-100) the two values are + moving in opposite directions, so more care must be taken. + + Note that the p**i term has only a minor effect as long as (1-p)*N is + small, and even then the effect is attenuated by the 1-p term. + """ + + assert server_dBA > 9 # >=90% availability to use the approximation + factor = binomial(n, k-1) + factor_dBA = 10 * math.log10(factor) + exponent = n - k + 1 + file_dBA = server_dBA * exponent - factor_dBA + return file_dBA + + def many_files_availability(self, file_dBA, num_files): + """The probability that 'num_files' independent bernoulli trials will + succeed (i.e. we can recover all files in the grid at any given + moment) is p**num_files . Since p is close to unity, we express in p + in dBA instead, so we can get useful precision on q (=1-p), and then + the formula becomes:: + + P_some_files_unavailable = 1 - (1 - q)**num_files + + That (1-q)**n expands with the usual binomial sequence, 1 - nq + + Xq**2 ... + Xq**n . We use the same approximation as before, since we + know q is close to zero, and we get to ignore all the terms past -nq. + """ + + many_files_dBA = file_dBA - 10 * math.log10(num_files) + return many_files_dBA diff -Nru tahoe-lafs-1.9.2/misc/operations_helpers/provisioning/provisioning.xhtml tahoe-lafs-1.10.0/misc/operations_helpers/provisioning/provisioning.xhtml --- tahoe-lafs-1.9.2/misc/operations_helpers/provisioning/provisioning.xhtml 1970-01-01 00:00:00.000000000 +0000 +++ tahoe-lafs-1.10.0/misc/operations_helpers/provisioning/provisioning.xhtml 2013-09-03 15:38:27.000000000 +0000 @@ -0,0 +1,18 @@ + + + Tahoe-LAFS - Provisioning Tool + + + + + + +

Tahoe-LAFS Provisioning Tool

+ +

This page will help you determine how much disk space and network +bandwidth will be required by various sizes and types of Tahoe-LAFS networks.

+ +
+ + + diff -Nru tahoe-lafs-1.9.2/misc/operations_helpers/provisioning/reliability.py tahoe-lafs-1.10.0/misc/operations_helpers/provisioning/reliability.py --- tahoe-lafs-1.9.2/misc/operations_helpers/provisioning/reliability.py 1970-01-01 00:00:00.000000000 +0000 +++ tahoe-lafs-1.10.0/misc/operations_helpers/provisioning/reliability.py 2013-09-03 15:38:27.000000000 +0000 @@ -0,0 +1,251 @@ +#! /usr/bin/python + +import math +from allmydata.util import statistics +from numpy import array, matrix, dot + +DAY=24*60*60 +MONTH=31*DAY +YEAR=365*DAY + +class ReliabilityModel: + """Generate a model of system-wide reliability, given several input + parameters. + + This runs a simulation in which time is quantized down to 'delta' seconds + (default is one month): a smaller delta will result in a more accurate + simulation, but will take longer to run. 'report_span' simulated seconds + will be run. + + The encoding parameters are provided as 'k' (minimum number of shares + needed to recover the file) and 'N' (total number of shares generated). + The default parameters are 3-of-10. + + The first step is to build a probability of individual drive loss during + any given delta. This uses a simple exponential model, in which the + average drive lifetime is specified by the 'drive_lifetime' parameter + (default is 8 years). + + The second step is to calculate a 'transition matrix': a table of + probabilities that shows, given A shares at the start of the delta, what + the chances are of having B shares left at the end of the delta. The + current code optimistically assumes all drives are independent. A + subclass could override that assumption. + + An additional 'repair matrix' is created to show what happens when the + Checker/Repairer is run. In the simulation, the Checker will be run every + 'check_period' seconds (default is one month), and the Repairer will be + run if it sees fewer than 'R' shares (default 7). + + The third step is to finally run the simulation. An initial probability + vector is created (with a 100% chance of N shares and a 0% chance of + fewer than N shares), then it is multiplied by the transition matrix for + every delta of time. Each time the Checker is to be run, the repair + matrix is multiplied in, and some additional stats are accumulated + (average number of repairs that occur, average number of shares + regenerated per repair). + + The output is a ReliabilityReport instance, which contains a table that + samples the state of the simulation once each 'report_period' seconds + (defaults to 3 months). Each row of this table will contain the + probability vector for one sample period (chance of having X shares, from + 0 to N, at the end of the period). The report will also contain other + information. + + """ + + @classmethod + def run(klass, + drive_lifetime=8*YEAR, + k=3, R=7, N=10, + delta=1*MONTH, + check_period=1*MONTH, + report_period=3*MONTH, + report_span=5*YEAR, + ): + self = klass() + + check_period = check_period-1 + P = self.p_in_period(drive_lifetime, delta) + + decay = self.build_decay_matrix(N, P) + + repair = self.build_repair_matrix(k, N, R) + + #print "DECAY:", decay + #print "OLD-POST-REPAIR:", old_post_repair + #print "NEW-POST-REPAIR:", decay * repair + #print "REPAIR:", repair + #print "DIFF:", (old_post_repair - decay * repair) + + START = array([0]*N + [1]) + DEAD = array([1]*k + [0]*(1+N-k)) + REPAIRp = array([0]*k + [1]*(R-k) + [0]*(1+N-R)) + REPAIR_newshares = array([0]*k + + [N-i for i in range(k, R)] + + [0]*(1+N-R)) + assert REPAIR_newshares.shape[0] == N+1 + #print "START", START + #print "REPAIRp", REPAIRp + #print "REPAIR_newshares", REPAIR_newshares + + unmaintained_state = START + maintained_state = START + last_check = 0 + last_report = 0 + P_repaired_last_check_period = 0.0 + needed_repairs = [] + needed_new_shares = [] + report = ReliabilityReport() + + for t in range(0, report_span+delta, delta): + # the .A[0] turns the one-row matrix back into an array + unmaintained_state = (unmaintained_state * decay).A[0] + maintained_state = (maintained_state * decay).A[0] + if (t-last_check) > check_period: + last_check = t + # we do a check-and-repair this frequently + need_repair = dot(maintained_state, REPAIRp) + + P_repaired_last_check_period = need_repair + new_shares = dot(maintained_state, REPAIR_newshares) + needed_repairs.append(need_repair) + needed_new_shares.append(new_shares) + + maintained_state = (maintained_state * repair).A[0] + + if (t-last_report) > report_period: + last_report = t + P_dead_unmaintained = dot(unmaintained_state, DEAD) + P_dead_maintained = dot(maintained_state, DEAD) + cumulative_number_of_repairs = sum(needed_repairs) + cumulative_number_of_new_shares = sum(needed_new_shares) + report.add_sample(t, unmaintained_state, maintained_state, + P_repaired_last_check_period, + cumulative_number_of_repairs, + cumulative_number_of_new_shares, + P_dead_unmaintained, P_dead_maintained) + + # record one more sample at the end of the run + P_dead_unmaintained = dot(unmaintained_state, DEAD) + P_dead_maintained = dot(maintained_state, DEAD) + cumulative_number_of_repairs = sum(needed_repairs) + cumulative_number_of_new_shares = sum(needed_new_shares) + report.add_sample(t, unmaintained_state, maintained_state, + P_repaired_last_check_period, + cumulative_number_of_repairs, + cumulative_number_of_new_shares, + P_dead_unmaintained, P_dead_maintained) + + #def yandm(seconds): + # return "%dy.%dm" % (int(seconds/YEAR), int( (seconds%YEAR)/MONTH)) + #needed_repairs_total = sum(needed_repairs) + #needed_new_shares_total = sum(needed_new_shares) + #print "at 2y:" + #print " unmaintained", unmaintained_state + #print " maintained", maintained_state + #print " number of repairs", needed_repairs_total + #print " new shares generated", needed_new_shares_total + #repair_rate_inv = report_span / needed_repairs_total + #print " avg repair rate: once every %s" % yandm(repair_rate_inv) + #print " avg repair download: one share every %s" % yandm(repair_rate_inv/k) + #print " avg repair upload: one share every %s" % yandm(report_span / needed_new_shares_total) + + return report + + def p_in_period(self, avg_lifetime, period): + """Given an average lifetime of a disk (using an exponential model), + what is the chance that a live disk will survive the next 'period' + seconds?""" + + # eg p_in_period(8*YEAR, MONTH) = 98.94% + return math.exp(-1.0*period/avg_lifetime) + + def build_decay_matrix(self, N, P): + """Return a decay matrix. decay[start_shares][end_shares] is the + conditional probability of finishing with end_shares, given that we + started with start_shares.""" + decay_rows = [] + decay_rows.append( [0.0]*(N+1) ) + for start_shares in range(1, (N+1)): + end_shares = self.build_decay_row(start_shares, P) + decay_row = end_shares + [0.0] * (N-start_shares) + assert len(decay_row) == (N+1), len(decay_row) + decay_rows.append(decay_row) + + decay = matrix(decay_rows) + return decay + + def build_decay_row(self, start_shares, P): + """Return a decay row 'end_shares'. end_shares[i] is the chance that + we finish with i shares, given that we started with start_shares, for + all i between 0 and start_shares, inclusive. This implementation + assumes that all shares are independent (IID), but a more complex + model could incorporate inter-share failure correlations like having + two shares on the same server.""" + end_shares = statistics.binomial_distribution_pmf(start_shares, P) + return end_shares + + def build_repair_matrix(self, k, N, R): + """Return a repair matrix. repair[start][end]: is the conditional + probability of the repairer finishing with 'end' shares, given that + it began with 'start' shares (repair if fewer than R shares). The + repairer's behavior is deterministic, so all values in this matrix + are either 0 or 1. This matrix should be applied *after* the decay + matrix.""" + new_repair_rows = [] + for start_shares in range(0, N+1): + new_repair_row = [0] * (N+1) + if start_shares < k: + new_repair_row[start_shares] = 1 + elif start_shares < R: + new_repair_row[N] = 1 + else: + new_repair_row[start_shares] = 1 + new_repair_rows.append(new_repair_row) + + repair = matrix(new_repair_rows) + return repair + +class ReliabilityReport: + def __init__(self): + self.samples = [] + + def add_sample(self, when, unmaintained_shareprobs, maintained_shareprobs, + P_repaired_last_check_period, + cumulative_number_of_repairs, + cumulative_number_of_new_shares, + P_dead_unmaintained, P_dead_maintained): + """ + when: the timestamp at the end of the report period + unmaintained_shareprobs: a vector of probabilities, element[S] + is the chance that there are S shares + left at the end of the report period. + This tracks what happens if no repair + is ever done. + maintained_shareprobs: same, but for 'maintained' grids, where + check and repair is done at the end + of each check period + P_repaired_last_check_period: a float, with the probability + that a repair was performed + at the end of the most recent + check period. + cumulative_number_of_repairs: a float, with the average number + of repairs that will have been + performed by the end of the + report period + cumulative_number_of_new_shares: a float, with the average number + of new shares that repair proceses + generated by the end of the report + period + P_dead_unmaintained: a float, with the chance that the file will + be unrecoverable at the end of the period + P_dead_maintained: same, but for maintained grids + + """ + row = (when, unmaintained_shareprobs, maintained_shareprobs, + P_repaired_last_check_period, + cumulative_number_of_repairs, + cumulative_number_of_new_shares, + P_dead_unmaintained, P_dead_maintained) + self.samples.append(row) diff -Nru tahoe-lafs-1.9.2/misc/operations_helpers/provisioning/reliability.xhtml tahoe-lafs-1.10.0/misc/operations_helpers/provisioning/reliability.xhtml --- tahoe-lafs-1.9.2/misc/operations_helpers/provisioning/reliability.xhtml 1970-01-01 00:00:00.000000000 +0000 +++ tahoe-lafs-1.10.0/misc/operations_helpers/provisioning/reliability.xhtml 2013-09-03 15:38:27.000000000 +0000 @@ -0,0 +1,63 @@ + + + Tahoe-LAFS - Reliability Tool + + + + + + +

Tahoe-LAFS Reliability Tool

+ +

Given certain assumptions, this page calculates probability of share loss +over time, to help make informed decisions about how much redundancy and +repair bandwidth to configure on a Tahoe-LAFS grid.

+ +
+ +

Simulation Results

+ +

At the end of the report span (elapsed time ), the simulated file had the following properties:

+ +
    +
  • Probability of loss (no maintenance): +
  • +
  • Probability of loss (with maintenance): +
  • +
  • Average repair frequency: + once every secs
  • +
  • Average shares generated per repair: +
  • +
+ +

This table shows how the following properties change over time:

+
    +
  • P_repair: the chance that a repair was performed in the most recent + check period.
  • +
  • P_dead (unmaintained): the chance that the file will be unrecoverable + without periodic check+repair
  • +
  • P_dead (maintained): the chance that the file will be unrecoverable even + with periodic check+repair
  • +
+ +
+
 %s 
+ + + + + + + + + + + + + +
tP_repairP_dead (unmaintained)P_dead (maintained)
no simulation data!
+ + + + diff -Nru tahoe-lafs-1.9.2/misc/operations_helpers/provisioning/run.py tahoe-lafs-1.10.0/misc/operations_helpers/provisioning/run.py --- tahoe-lafs-1.9.2/misc/operations_helpers/provisioning/run.py 1970-01-01 00:00:00.000000000 +0000 +++ tahoe-lafs-1.10.0/misc/operations_helpers/provisioning/run.py 2013-09-03 15:38:27.000000000 +0000 @@ -0,0 +1,45 @@ +#!/usr/bin/env python + +# this depends upon Twisted and Nevow, but not upon Tahoe itself + +import webbrowser + +from twisted.application import strports +from twisted.internet import reactor +from nevow import appserver, rend, loaders +from twisted.web import static +import web_reliability, provisioning + +class Root(rend.Page): + docFactory = loaders.xmlstr('''\ + + + Tahoe-LAFS Provisioning/Reliability Calculator + + + +

Reliability Tool

+

Provisioning Tool

+ + +''') + + child_reliability = web_reliability.ReliabilityTool() + child_provisioning = provisioning.ProvisioningTool() + + +def run(portnum): + root = Root() + root.putChild("tahoe.css", static.File("tahoe.css")) + site = appserver.NevowSite(root) + s = strports.service("tcp:%d" % portnum, site) + s.startService() + reactor.callLater(1.0, webbrowser.open, "http://localhost:%d/" % portnum) + reactor.run() + +if __name__ == '__main__': + import sys + portnum = 8070 + if len(sys.argv) > 1: + portnum = int(sys.argv[1]) + run(portnum) diff -Nru tahoe-lafs-1.9.2/misc/operations_helpers/provisioning/tahoe.css tahoe-lafs-1.10.0/misc/operations_helpers/provisioning/tahoe.css --- tahoe-lafs-1.9.2/misc/operations_helpers/provisioning/tahoe.css 1970-01-01 00:00:00.000000000 +0000 +++ tahoe-lafs-1.10.0/misc/operations_helpers/provisioning/tahoe.css 2013-09-03 15:38:27.000000000 +0000 @@ -0,0 +1,163 @@ + +pre.overflow { + background: #f7f7f7; + border: 1px solid #d7d7d7; + margin: 1em 1.75em; + padding: .25em; + overflow: auto; + } + +/* ----------------------------------------------------------------------- */ + +/* colors borrowed from the Allmydata logo */ + +/* general style */ +h1 { + text-align: center; +} +table { + margin: 1em auto; + border: .2em solid #3289b4; + border-spacing: 1px; +} +th { + color: white; + background-color: #58a1c3; +} +td { + padding: .3em .3em; +} + +th { + padding: .3em .3em; +} + +.table-headings-top th { + text-align: center; + +} +.table-headings-left th { + text-align: right; + vertical-align: top; +} +legend { + font-weight: bold; +} + +.connected-yes, .connected-True { + border: 1px solid #75d24a; + background-color: #EFE; +} +.connected-no, .connected-False { + border: 1px solid #F00; + background-color: #FBB; +} + +.encoded, .nodeid { + font-family: monospace; + font-size: 80%; +} + +.empty-marker { + background-color: white; + color: gray; +} +table td.empty-marker { + padding: 6em 10em; + text-align: center; + vertical-align: center; +} + +/* styles for server listings in tables (nickname above nodeid) */ +th.nickname-and-peerid { + text-align: left; +} +.nickname { + font: inherit; + font-family: sans-serif; + font-weight: bold; +} + + +/* just in case, make sure floats don't stomp on big tables etc. */ +#section { clear: both; } + +/* section-specific styles - turn this client info into a sidebar */ +#this-client { + font-size: 60%; + border: .2em solid #3289b4; + float: right; + width: 40%; + margin: 0 0 .5em .5em; + padding: 3px; +} +#this-client .nodeid { font-size: inherit; } +#this-client h2 { + text-align: center; + background: #3289b4; + color: white; + margin: -2px -2px 0 -2px; /* matches padding */ + padding: .3em; +} +#this-client table { + font-size: inherit; + margin: 0 -3px -3px -3px; /* matches padding */ +} +#this-client td > ul { + list-style-type: outside; + margin: 0 0 0 2.3em; + padding-left: 0; +} + + +/* services table */ +.services { +} + +/* --- Directory page styles --- */ + +body.tahoe-directory-page { + color: black; + background: #c0d9e6; + margin: 1em 0; /* zero margin so the table can be flush */ +} +table.tahoe-directory { + color: black; + background: white; + width: 100%; + /*border-left-color: #D7E0E5; + border-right-color: #D7E0E5;*/ + border-left: 0; + border-right: 0; +} +.tahoe-directory-footer { + color: black; + background: #c0d9e6; + margin: 0 1em; /* compensate for page 0 margin */ +} + +/* directory-screen toolbar */ +.toolbar { + display: table; + margin: .2em auto; + text-align: center; + /*width: 100%;*/ +} +.toolbar .toolbar-item { + display: inline; + text-align: center; + padding: 0 1em; +} + +/* recent upload/download status pages */ + +table.status-download-events { + #border: 1px solid #aaa; + margin: 1em auto; + border: .2em solid #3289b4; + border-spacing: 1px; +} +table.status-download-events td { + border: 1px solid #a00; + padding: 2px +} diff -Nru tahoe-lafs-1.9.2/misc/operations_helpers/provisioning/test_provisioning.py tahoe-lafs-1.10.0/misc/operations_helpers/provisioning/test_provisioning.py --- tahoe-lafs-1.9.2/misc/operations_helpers/provisioning/test_provisioning.py 1970-01-01 00:00:00.000000000 +0000 +++ tahoe-lafs-1.10.0/misc/operations_helpers/provisioning/test_provisioning.py 2013-09-03 15:38:27.000000000 +0000 @@ -0,0 +1,115 @@ + +import unittest +from allmydata import provisioning +ReliabilityModel = None +try: + from allmydata.reliability import ReliabilityModel +except ImportError: + pass # might not be importable, since it needs NumPy + +from nevow import inevow +from zope.interface import implements + +class MyRequest: + implements(inevow.IRequest) + pass + +class Provisioning(unittest.TestCase): + def getarg(self, name, astype=int): + if name in self.fields: + return astype(self.fields[name]) + return None + + def test_load(self): + pt = provisioning.ProvisioningTool() + self.fields = {} + #r = MyRequest() + #r.fields = self.fields + #ctx = RequestContext() + #unfilled = pt.renderSynchronously(ctx) + lots_of_stan = pt.do_forms(self.getarg) + self.failUnless(lots_of_stan is not None) + + self.fields = {'filled': True, + "num_users": 50e3, + "files_per_user": 1000, + "space_per_user": 1e9, + "sharing_ratio": 1.0, + "encoding_parameters": "3-of-10-5", + "num_servers": 30, + "ownership_mode": "A", + "download_rate": 100, + "upload_rate": 10, + "delete_rate": 10, + "lease_timer": 7, + } + #filled = pt.renderSynchronously(ctx) + more_stan = pt.do_forms(self.getarg) + self.failUnless(more_stan is not None) + + # trigger the wraparound configuration + self.fields["num_servers"] = 5 + #filled = pt.renderSynchronously(ctx) + more_stan = pt.do_forms(self.getarg) + + # and other ownership modes + self.fields["ownership_mode"] = "B" + more_stan = pt.do_forms(self.getarg) + self.fields["ownership_mode"] = "E" + more_stan = pt.do_forms(self.getarg) + + def test_provisioning_math(self): + self.failUnlessEqual(provisioning.binomial(10, 0), 1) + self.failUnlessEqual(provisioning.binomial(10, 1), 10) + self.failUnlessEqual(provisioning.binomial(10, 2), 45) + self.failUnlessEqual(provisioning.binomial(10, 9), 10) + self.failUnlessEqual(provisioning.binomial(10, 10), 1) + +DAY=24*60*60 +MONTH=31*DAY +YEAR=365*DAY + +class Reliability(unittest.TestCase): + def test_basic(self): + if ReliabilityModel is None: + raise unittest.SkipTest("reliability model requires NumPy") + + # test that numpy math works the way I think it does + import numpy + decay = numpy.matrix([[1,0,0], + [.1,.9,0], + [.01,.09,.9], + ]) + start = numpy.array([0,0,1]) + g2 = (start * decay).A[0] + self.failUnlessEqual(repr(g2), repr(numpy.array([.01,.09,.9]))) + g3 = (g2 * decay).A[0] + self.failUnlessEqual(repr(g3), repr(numpy.array([.028,.162,.81]))) + + # and the dot product + recoverable = numpy.array([0,1,1]) + P_recoverable_g2 = numpy.dot(g2, recoverable) + self.failUnlessAlmostEqual(P_recoverable_g2, .9 + .09) + P_recoverable_g3 = numpy.dot(g3, recoverable) + self.failUnlessAlmostEqual(P_recoverable_g3, .81 + .162) + + r = ReliabilityModel.run(delta=100000, + report_period=3*MONTH, + report_span=5*YEAR) + self.failUnlessEqual(len(r.samples), 20) + + last_row = r.samples[-1] + #print last_row + (when, unmaintained_shareprobs, maintained_shareprobs, + P_repaired_last_check_period, + cumulative_number_of_repairs, + cumulative_number_of_new_shares, + P_dead_unmaintained, P_dead_maintained) = last_row + self.failUnless(isinstance(P_repaired_last_check_period, float)) + self.failUnless(isinstance(P_dead_unmaintained, float)) + self.failUnless(isinstance(P_dead_maintained, float)) + self.failUnlessAlmostEqual(P_dead_unmaintained, 0.033591004555395272) + self.failUnlessAlmostEqual(P_dead_maintained, 3.2983995819177542e-08) + +if __name__=='__main__': + unittest.main() diff -Nru tahoe-lafs-1.9.2/misc/operations_helpers/provisioning/util.py tahoe-lafs-1.10.0/misc/operations_helpers/provisioning/util.py --- tahoe-lafs-1.9.2/misc/operations_helpers/provisioning/util.py 1970-01-01 00:00:00.000000000 +0000 +++ tahoe-lafs-1.10.0/misc/operations_helpers/provisioning/util.py 2013-09-03 15:38:27.000000000 +0000 @@ -0,0 +1,5 @@ + +import os.path + +def sibling(filename): + return os.path.join(os.path.dirname(os.path.abspath(__file__)), filename) diff -Nru tahoe-lafs-1.9.2/misc/operations_helpers/provisioning/web_reliability.py tahoe-lafs-1.10.0/misc/operations_helpers/provisioning/web_reliability.py --- tahoe-lafs-1.9.2/misc/operations_helpers/provisioning/web_reliability.py 1970-01-01 00:00:00.000000000 +0000 +++ tahoe-lafs-1.10.0/misc/operations_helpers/provisioning/web_reliability.py 2013-09-03 15:38:27.000000000 +0000 @@ -0,0 +1,168 @@ + +from nevow import rend, loaders, tags as T +from nevow.inevow import IRequest +import reliability # requires NumPy +import util + +def get_arg(ctx_or_req, argname, default=None, multiple=False): + """Extract an argument from either the query args (req.args) or the form + body fields (req.fields). If multiple=False, this returns a single value + (or the default, which defaults to None), and the query args take + precedence. If multiple=True, this returns a tuple of arguments (possibly + empty), starting with all those in the query args. + """ + req = IRequest(ctx_or_req) + results = [] + if argname in req.args: + results.extend(req.args[argname]) + if req.fields and argname in req.fields: + results.append(req.fields[argname].value) + if multiple: + return tuple(results) + if results: + return results[0] + return default + + +DAY=24*60*60 +MONTH=31*DAY +YEAR=365*DAY + +def is_available(): + if reliability: + return True + return False + +def yandm(seconds): + return "%dy.%dm" % (int(seconds/YEAR), int( (seconds%YEAR)/MONTH)) + +class ReliabilityTool(rend.Page): + addSlash = True + docFactory = loaders.xmlfile(util.sibling("reliability.xhtml")) + + DEFAULT_PARAMETERS = [ + ("drive_lifetime", "8Y", "time", + "Average drive lifetime"), + ("k", 3, "int", + "Minimum number of shares needed to recover the file"), + ("R", 7, "int", + "Repair threshold: repair will not occur until fewer than R shares " + "are left"), + ("N", 10, "int", + "Total number of shares of the file generated"), + ("delta", "1M", "time", "Amount of time between each simulation step"), + ("check_period", "1M", "time", + "How often to run the checker and repair if fewer than R shares"), + ("report_period", "3M", "time", + "Amount of time between result rows in this report"), + ("report_span", "5Y", "time", + "Total amount of time covered by this report"), + ] + + def parse_time(self, s): + if s.endswith("M"): + return int(s[:-1]) * MONTH + if s.endswith("Y"): + return int(s[:-1]) * YEAR + return int(s) + + def format_time(self, s): + if s%YEAR == 0: + return "%dY" % (s/YEAR) + if s%MONTH == 0: + return "%dM" % (s/MONTH) + return "%d" % s + + def get_parameters(self, ctx): + parameters = {} + for (name,default,argtype,description) in self.DEFAULT_PARAMETERS: + v = get_arg(ctx, name, default) + if argtype == "time": + value = self.parse_time(v) + else: + value = int(v) + parameters[name] = value + return parameters + + def renderHTTP(self, ctx): + self.parameters = self.get_parameters(ctx) + self.results = reliability.ReliabilityModel.run(**self.parameters) + return rend.Page.renderHTTP(self, ctx) + + def make_input(self, name, old_value): + return T.input(name=name, type="text", size="5", + value=self.format_time(old_value)) + + def render_forms(self, ctx, data): + f = T.form(action=".", method="get") + table = [] + for (name,default_value,argtype,description) in self.DEFAULT_PARAMETERS: + old_value = self.parameters[name] + i = self.make_input(name, old_value) + table.append(T.tr[T.td[name+":"], T.td[i], T.td[description]]) + go = T.input(type="submit", value="Recompute") + return [T.h2["Simulation Parameters:"], + f[T.table[table], go], + ] + + def data_simulation_table(self, ctx, data): + for row in self.results.samples: + yield row + + def render_simulation_row(self, ctx, row): + (when, unmaintained_shareprobs, maintained_shareprobs, + P_repaired_last_check_period, + cumulative_number_of_repairs, + cumulative_number_of_new_shares, + P_dead_unmaintained, P_dead_maintained) = row + ctx.fillSlots("t", yandm(when)) + ctx.fillSlots("P_repair", "%.6f" % P_repaired_last_check_period) + ctx.fillSlots("P_dead_unmaintained", "%.6g" % P_dead_unmaintained) + ctx.fillSlots("P_dead_maintained", "%.6g" % P_dead_maintained) + return ctx.tag + + def render_report_span(self, ctx, row): + (when, unmaintained_shareprobs, maintained_shareprobs, + P_repaired_last_check_period, + cumulative_number_of_repairs, + cumulative_number_of_new_shares, + P_dead_unmaintained, P_dead_maintained) = self.results.samples[-1] + return ctx.tag[yandm(when)] + + def render_P_loss_unmaintained(self, ctx, row): + (when, unmaintained_shareprobs, maintained_shareprobs, + P_repaired_last_check_period, + cumulative_number_of_repairs, + cumulative_number_of_new_shares, + P_dead_unmaintained, P_dead_maintained) = self.results.samples[-1] + return ctx.tag["%.6g (%1.8f%%)" % (P_dead_unmaintained, + 100*P_dead_unmaintained)] + + def render_P_loss_maintained(self, ctx, row): + (when, unmaintained_shareprobs, maintained_shareprobs, + P_repaired_last_check_period, + cumulative_number_of_repairs, + cumulative_number_of_new_shares, + P_dead_unmaintained, P_dead_maintained) = self.results.samples[-1] + return ctx.tag["%.6g (%1.8f%%)" % (P_dead_maintained, + 100*P_dead_maintained)] + + def render_P_repair_rate(self, ctx, row): + (when, unmaintained_shareprobs, maintained_shareprobs, + P_repaired_last_check_period, + cumulative_number_of_repairs, + cumulative_number_of_new_shares, + P_dead_unmaintained, P_dead_maintained) = self.results.samples[-1] + freq = when / cumulative_number_of_repairs + return ctx.tag["%.6g" % freq] + + def render_P_repair_shares(self, ctx, row): + (when, unmaintained_shareprobs, maintained_shareprobs, + P_repaired_last_check_period, + cumulative_number_of_repairs, + cumulative_number_of_new_shares, + P_dead_unmaintained, P_dead_maintained) = self.results.samples[-1] + generated_shares = cumulative_number_of_new_shares / cumulative_number_of_repairs + return ctx.tag["%1.2f" % generated_shares] + + diff -Nru tahoe-lafs-1.9.2/relnotes.txt tahoe-lafs-1.10.0/relnotes.txt --- tahoe-lafs-1.9.2/relnotes.txt 2012-07-03 16:28:28.000000000 +0000 +++ tahoe-lafs-1.10.0/relnotes.txt 2013-09-03 15:38:27.000000000 +0000 @@ -1,7 +1,7 @@ -ANNOUNCING Tahoe, the Least-Authority File System, v1.9.2 +ANNOUNCING Tahoe, the Least-Authority File System, v1.10 The Tahoe-LAFS team is pleased to announce the immediate -availability of version 1.9.2 of Tahoe-LAFS, an extremely +availability of version 1.10.0 of Tahoe-LAFS, an extremely reliable distributed storage system. Get it here: https://tahoe-lafs.org/source/tahoe-lafs/trunk/docs/quickstart.rst @@ -14,11 +14,14 @@ https://tahoe-lafs.org/source/tahoe-lafs/trunk/docs/about.rst -The previous stable release of Tahoe-LAFS was v1.9.1, released -on January 12, 2012. +The previous stable release of Tahoe-LAFS was v1.9.2, released +on July 3, 2012. -v1.9.2 is a bugfix release that primarily fixes regressions -in mutable file support. See the NEWS file [1] for details. +v1.10.0 is a feature release which adds a new Introducer +protocol, improves the appearance of the web-based user +interface, improves grid security by making introducer FURLs +unguessable, and fixes many bugs. See the NEWS file [1] for +details. WHAT IS IT GOOD FOR? @@ -51,7 +54,7 @@ COMPATIBILITY -This release is compatible with the version 1 series of +This release should be compatible with the version 1 series of Tahoe-LAFS. Clients from this release can write files and directories in the format used by clients of all versions back to v1.0 (which was released March 25, 2008). Clients from this @@ -60,7 +63,18 @@ clients of all versions back to v1.0 and clients from this release can use servers of all versions back to v1.0. -This is the seventeenth release in the version 1 series. This +Except for the new optional MDMF format, we have not made any +intentional compatibility changes. However we do not yet have +the test infrastructure to continuously verify that all new +versions are interoperable with previous versions. We intend +to build such an infrastructure in the future. + +The new Introducer protocol added in v1.10 is backwards +compatible with older clients and introducer servers, however +some features will be unavailable when an older node is +involved. Please see docs/nodekeys.rst [14] for details. + +This is the eighteenth release in the version 1 series. This series of Tahoe-LAFS will be actively supported and maintained for the foreseeable future, and future versions of Tahoe-LAFS will retain the ability to read and write files compatible @@ -124,16 +138,16 @@ ACKNOWLEDGEMENTS -This is the eleventh release of Tahoe-LAFS to be created solely +This is the twelfth release of Tahoe-LAFS to be created solely as a labor of love by volunteers. Thank you very much to the team of "hackers in the public interest" who make Tahoe-LAFS possible. -David-Sarah Hopwood +Brian Warner on behalf of the Tahoe-LAFS team -July 3, 2012 -Rainhill, Merseyside, UK +May 1, 2013 +San Francisco, California, USA [1] https://tahoe-lafs.org/trac/tahoe-lafs/browser/NEWS.rst @@ -149,3 +163,4 @@ [11] http://atlasnetworks.us/ [12] https://leastauthority.com/ [13] https://tahoe-lafs.org/hacktahoelafs/ +[14] https://tahoe-lafs.org/trac/tahoe-lafs/browser/docs/nodekeys.rst diff -Nru tahoe-lafs-1.9.2/setup.cfg tahoe-lafs-1.10.0/setup.cfg --- tahoe-lafs-1.9.2/setup.cfg 2012-07-03 18:51:11.000000000 +0000 +++ tahoe-lafs-1.10.0/setup.cfg 2013-09-03 15:38:27.000000000 +0000 @@ -1,20 +1,20 @@ [easy_install] zip_ok = False find_links = misc/dependencies tahoe-deps ../tahoe-deps - https://tahoe-lafs.org/source/tahoe-lafs/deps/tahoe-dep-sdists/ + https://tahoe-lafs.org/source/tahoe-lafs/deps/tahoe-lafs-dep-sdists/ https://tahoe-lafs.org/source/tahoe-lafs/deps/tahoe-lafs-dep-eggs/ -[egg_info] -tag_build = -tag_date = 0 -tag_svn_revision = 0 - [aliases] -sdist_dsc = update_version sdist_dsc -sdist = update_version sdist -trial = update_version trial build = update_version develop --prefix=support make_executable build -install = update_version install test = update_version develop --prefix=support make_executable build trial +sdist = update_version sdist +install = update_version install bdist_egg = update_version bdist_egg +trial = update_version trial +sdist_dsc = update_version sdist_dsc + +[egg_info] +tag_build = +tag_date = 0 +tag_svn_revision = 0 diff -Nru tahoe-lafs-1.9.2/setup.py tahoe-lafs-1.10.0/setup.py --- tahoe-lafs-1.9.2/setup.py 2012-05-31 22:25:33.000000000 +0000 +++ tahoe-lafs-1.10.0/setup.py 2013-09-03 15:38:27.000000000 +0000 @@ -1,6 +1,6 @@ #! /usr/bin/env python # -*- coding: utf-8 -*- -u"Tahoe-LAFS does not run under Python 3. Please use a version of Python between 2.4.4 and 2.7.x inclusive." +import sys; assert sys.version_info < (3,), ur"Tahoe-LAFS does not run under Python 3. Please use a version of Python between 2.6 and 2.7.x inclusive." # Tahoe-LAFS -- secure, distributed storage grid # @@ -10,7 +10,7 @@ # # See the docs/about.rst file for licensing information. -import glob, os, stat, subprocess, sys, re +import glob, os, stat, subprocess, re ##### sys.path management @@ -49,7 +49,7 @@ open(APPNAMEFILE, "w").write(APPNAMEFILESTR) else: if curappnamefilestr.strip() != APPNAMEFILESTR: - print "Error -- this setup.py file is configured with the 'application name' to be '%s', but there is already a file in place in '%s' which contains the contents '%s'. If the file is wrong, please remove it and setup.py will regenerate it and write '%s' into it." % (APPNAME, APPNAMEFILE, curappnamefilestr, APPNAMEFILESTR) + print("Error -- this setup.py file is configured with the 'application name' to be '%s', but there is already a file in place in '%s' which contains the contents '%s'. If the file is wrong, please remove it and setup.py will regenerate it and write '%s' into it." % (APPNAME, APPNAMEFILE, curappnamefilestr, APPNAMEFILESTR)) sys.exit(-1) # setuptools/zetuptoolz looks in __main__.__requires__ for a list of @@ -72,8 +72,6 @@ egg = os.path.realpath(glob.glob('setuptools-*.egg')[0]) sys.path.insert(0, egg) -egg = os.path.realpath(glob.glob('darcsver-*.egg')[0]) -sys.path.insert(0, egg) import setuptools; setuptools.bootstrap_install_from = egg from setuptools import setup @@ -120,20 +118,6 @@ setup_requires = [] -# The darcsver command from the darcsver plugin is needed to initialize the -# distribution's .version attribute correctly. (It does this either by -# examining darcs history, or if that fails by reading the -# src/allmydata/_version.py file). darcsver will also write a new version -# stamp in src/allmydata/_version.py, with a version number derived from -# darcs history. Note that the setup.cfg file has an "[aliases]" section -# which enumerates commands that you might run and specifies that it will run -# darcsver before each one. If you add different commands (or if I forgot -# some that are already in use), you may need to add it to setup.cfg and -# configure it to run darcsver before your command, if you want the version -# number to be correct when that command runs. -# http://pypi.python.org/pypi/darcsver -setup_requires.append('darcsver >= 1.7.2') - # Nevow imports itself when building, which causes Twisted and zope.interface # to be imported. We need to make sure that the versions of Twisted and # zope.interface used at build time satisfy Nevow's requirements. If not @@ -256,15 +240,6 @@ raise -DARCS_VERSION_BODY = ''' -# This _version.py is generated from darcs metadata by the tahoe setup.py -# and the "darcsver" package. - -__pkgname__ = "%(pkgname)s" -verstr = "%(pkgversion)s" -__version__ = verstr -''' - GIT_VERSION_BODY = ''' # This _version.py is generated from git metadata by the tahoe setup.py. @@ -279,15 +254,15 @@ try: # remember shell=False, so use git.cmd on windows, not just git p = subprocess.Popen(args, stdout=subprocess.PIPE, cwd=cwd) - except EnvironmentError, e: + except EnvironmentError as e: # if this gives a SyntaxError, note that Tahoe-LAFS requires Python 2.6+ if verbose: - print "unable to run %s" % args[0] - print e + print("unable to run %s" % args[0]) + print(e) return None stdout = p.communicate()[0].strip() if p.returncode != 0: if verbose: - print "unable to run %s (error)" % args[0] + print("unable to run %s (error)" % args[0]) return None return stdout @@ -326,7 +301,7 @@ return {} if not stdout.startswith(tag_prefix): if verbose: - print "tag '%s' doesn't start with prefix '%s'" % (stdout, tag_prefix) + print("tag '%s' doesn't start with prefix '%s'" % (stdout, tag_prefix)) return {} version = stdout[len(tag_prefix):] pieces = version.split("-") @@ -343,6 +318,11 @@ normalized_version += ".dev0" return {"version": version, "normalized": normalized_version, "full": full} +# setup.cfg has an [aliases] section which runs "update_version" before many +# commands (like "build" and "sdist") that need to know our package version +# ahead of time. If you add different commands (or if we forgot some), you +# may need to add it to setup.cfg and configure it to run update_version +# before your command. class UpdateVersion(Command): description = "update _version.py from revision-control metadata" @@ -353,38 +333,26 @@ def finalize_options(self): pass def run(self): - target = self.distribution.versionfiles[0] - if os.path.isdir(os.path.join(basedir, "_darcs")): - verstr = self.try_from_darcs(target) - elif os.path.isdir(os.path.join(basedir, ".git")): - verstr = self.try_from_git(target) + if os.path.isdir(os.path.join(basedir, ".git")): + verstr = self.try_from_git() else: - print "no version-control data found, leaving _version.py alone" + print("no version-control data found, leaving _version.py alone") return if verstr: self.distribution.metadata.version = verstr - def try_from_darcs(self, target): - from darcsver.darcsvermodule import update - (rc, verstr) = update(pkgname=self.distribution.get_name(), - verfilename=self.distribution.versionfiles, - revision_number=True, - version_body=DARCS_VERSION_BODY) - if rc == 0: - return verstr - - def try_from_git(self, target): + def try_from_git(self): versions = versions_from_git("allmydata-tahoe-", verbose=True) if versions: - for fn in self.distribution.versionfiles: - f = open(fn, "wb") - f.write(GIT_VERSION_BODY % - { "pkgname": self.distribution.get_name(), - "version": versions["version"], - "normalized": versions["normalized"], - "full": versions["full"] }) - f.close() - print "git-version: wrote '%s' into '%s'" % (versions["version"], fn) + fn = 'src/allmydata/_version.py' + f = open(fn, "wb") + f.write(GIT_VERSION_BODY % + { "pkgname": self.distribution.get_name(), + "version": versions["version"], + "normalized": versions["normalized"], + "full": versions["full"] }) + f.close() + print("git-version: wrote '%s' into '%s'" % (versions["version"], fn)) return versions.get("normalized", None) @@ -467,6 +435,7 @@ 'allmydata.util', 'allmydata.web', 'allmydata.web.static', + 'allmydata.web.static.css', 'allmydata.windows', 'buildtest'], classifiers=trove_classifiers, @@ -475,10 +444,10 @@ tests_require=tests_require, package_data={"allmydata.web": ["*.xhtml"], "allmydata.web.static": ["*.js", "*.png", "*.css"], + "allmydata.web.static.css": ["*.css"], }, setup_requires=setup_requires, entry_points = { 'console_scripts': [ 'tahoe = allmydata.scripts.runner:run' ] }, zip_safe=False, # We prefer unzipped for easier access. - versionfiles=['src/allmydata/_version.py',], **setup_args ) diff -Nru tahoe-lafs-1.9.2/setuptools-0.6c16dev3.egg/EGG-INFO/PKG-INFO tahoe-lafs-1.10.0/setuptools-0.6c16dev3.egg/EGG-INFO/PKG-INFO --- tahoe-lafs-1.9.2/setuptools-0.6c16dev3.egg/EGG-INFO/PKG-INFO 2012-05-14 02:07:18.000000000 +0000 +++ tahoe-lafs-1.10.0/setuptools-0.6c16dev3.egg/EGG-INFO/PKG-INFO 1970-01-01 00:00:00.000000000 +0000 @@ -1,100 +0,0 @@ -Metadata-Version: 1.0 -Name: setuptools -Version: 0.6c16dev3 -Summary: Download, build, install, upgrade, and uninstall Python packages -- easily! (zetuptoolz fork) -Home-page: http://pypi.python.org/pypi/setuptools -Author: Phillip J. Eby -Author-email: distutils-sig@python.org -License: PSF or ZPL -Description: ====================== - This is not Setuptools - ====================== - - This is the ``zetuptoolz`` fork of setuptools, which is used to install - `Tahoe-LAFS`_. It has a `darcs source repository`_ and `issue tracker`_. - - For a list of differences between this fork and setuptools, see zetuptoolz.txt. - - Note that, to avoid interfering with any setuptools installation, zetuptoolz - does not install a script called ``easy_install``. There is an ``easy_install_z`` - script, but that is intended only for developers to test differences between - setuptools and zetuptoolz. - - .. _Tahoe-LAFS: http://tahoe-lafs.org/ - .. _darcs source repository: http://tahoe-lafs.org/source/zetuptoolz/trunk - .. _issue tracker: http://tahoe-lafs.org/trac/zetuptoolz - - - -------------------------------- - Using Setuptools and EasyInstall - -------------------------------- - - Here are some of the available manuals, tutorials, and other resources for - learning about Setuptools, Python Eggs, and EasyInstall: - - * `The EasyInstall user's guide and reference manual`_ - * `The setuptools Developer's Guide`_ - * `The pkg_resources API reference`_ - * `Package Compatibility Notes`_ (user-maintained) - * `The Internal Structure of Python Eggs`_ - - Questions, comments, and bug reports should be directed to the `distutils-sig - mailing list`_. If you have written (or know of) any tutorials, documentation, - plug-ins, or other resources for setuptools users, please let us know about - them there, so this reference list can be updated. If you have working, - *tested* patches to correct problems or add features, you may submit them to - the `setuptools bug tracker`_. - - .. _setuptools bug tracker: http://bugs.python.org/setuptools/ - .. _Package Compatibility Notes: http://peak.telecommunity.com/DevCenter/PackageNotes - .. _The Internal Structure of Python Eggs: http://peak.telecommunity.com/DevCenter/EggFormats - .. _The setuptools Developer's Guide: http://peak.telecommunity.com/DevCenter/setuptools - .. _The pkg_resources API reference: http://peak.telecommunity.com/DevCenter/PkgResources - .. _The EasyInstall user's guide and reference manual: http://peak.telecommunity.com/DevCenter/EasyInstall - .. _distutils-sig mailing list: http://mail.python.org/pipermail/distutils-sig/ - - - ------- - Credits - ------- - - * The original design for the ``.egg`` format and the ``pkg_resources`` API was - co-created by Phillip Eby and Bob Ippolito. Bob also implemented the first - version of ``pkg_resources``, and supplied the OS X operating system version - compatibility algorithm. - - * Ian Bicking implemented many early "creature comfort" features of - easy_install, including support for downloading via Sourceforge and - Subversion repositories. Ian's comments on the Web-SIG about WSGI - application deployment also inspired the concept of "entry points" in eggs, - and he has given talks at PyCon and elsewhere to inform and educate the - community about eggs and setuptools. - - * Jim Fulton contributed time and effort to build automated tests of various - aspects of ``easy_install``, and supplied the doctests for the command-line - ``.exe`` wrappers on Windows. - - * Phillip J. Eby is the principal author and maintainer of setuptools, and - first proposed the idea of an importable binary distribution format for - Python application plug-ins. - - * Significant parts of the implementation of setuptools were funded by the Open - Source Applications Foundation, to provide a plug-in infrastructure for the - Chandler PIM application. In addition, many OSAF staffers (such as Mike - "Code Bear" Taylor) contributed their time and stress as guinea pigs for the - use of eggs and setuptools, even before eggs were "cool". (Thanks, guys!) - - .. _files: - -Keywords: CPAN PyPI distutils eggs package management -Platform: UNKNOWN -Classifier: Development Status :: 3 - Alpha -Classifier: Intended Audience :: Developers -Classifier: License :: OSI Approved :: Python Software Foundation License -Classifier: License :: OSI Approved :: Zope Public License -Classifier: Operating System :: OS Independent -Classifier: Programming Language :: Python -Classifier: Topic :: Software Development :: Libraries :: Python Modules -Classifier: Topic :: System :: Archiving :: Packaging -Classifier: Topic :: System :: Systems Administration -Classifier: Topic :: Utilities diff -Nru tahoe-lafs-1.9.2/setuptools-0.6c16dev3.egg/EGG-INFO/SOURCES.txt tahoe-lafs-1.10.0/setuptools-0.6c16dev3.egg/EGG-INFO/SOURCES.txt --- tahoe-lafs-1.9.2/setuptools-0.6c16dev3.egg/EGG-INFO/SOURCES.txt 2012-05-14 02:07:18.000000000 +0000 +++ tahoe-lafs-1.10.0/setuptools-0.6c16dev3.egg/EGG-INFO/SOURCES.txt 1970-01-01 00:00:00.000000000 +0000 @@ -1,44 +0,0 @@ -README.txt -easy_install.py -pkg_resources.py -setup.cfg -setup.py -setuptools/__init__.py -setuptools/archive_util.py -setuptools/depends.py -setuptools/dist.py -setuptools/extension.py -setuptools/package_index.py -setuptools/sandbox.py -setuptools/site-patch.py -setuptools.egg-info/PKG-INFO -setuptools.egg-info/SOURCES.txt -setuptools.egg-info/dependency_links.txt -setuptools.egg-info/entry_points.txt -setuptools.egg-info/top_level.txt -setuptools.egg-info/zip-safe -setuptools/command/__init__.py -setuptools/command/alias.py -setuptools/command/bdist_egg.py -setuptools/command/bdist_rpm.py -setuptools/command/bdist_wininst.py -setuptools/command/build_ext.py -setuptools/command/build_py.py -setuptools/command/develop.py -setuptools/command/easy_install.py -setuptools/command/egg_info.py -setuptools/command/install.py -setuptools/command/install_egg_info.py -setuptools/command/install_lib.py -setuptools/command/install_scripts.py -setuptools/command/register.py -setuptools/command/rotate.py -setuptools/command/saveopts.py -setuptools/command/scriptsetup.py -setuptools/command/sdist.py -setuptools/command/setopt.py -setuptools/command/test.py -setuptools/command/upload.py -setuptools/tests/__init__.py -setuptools/tests/test_packageindex.py -setuptools/tests/test_resources.py \ No newline at end of file diff -Nru tahoe-lafs-1.9.2/setuptools-0.6c16dev3.egg/EGG-INFO/dependency_links.txt tahoe-lafs-1.10.0/setuptools-0.6c16dev3.egg/EGG-INFO/dependency_links.txt --- tahoe-lafs-1.9.2/setuptools-0.6c16dev3.egg/EGG-INFO/dependency_links.txt 2012-05-14 02:07:18.000000000 +0000 +++ tahoe-lafs-1.10.0/setuptools-0.6c16dev3.egg/EGG-INFO/dependency_links.txt 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ - diff -Nru tahoe-lafs-1.9.2/setuptools-0.6c16dev3.egg/EGG-INFO/entry_points.txt tahoe-lafs-1.10.0/setuptools-0.6c16dev3.egg/EGG-INFO/entry_points.txt --- tahoe-lafs-1.9.2/setuptools-0.6c16dev3.egg/EGG-INFO/entry_points.txt 2012-05-14 02:07:18.000000000 +0000 +++ tahoe-lafs-1.10.0/setuptools-0.6c16dev3.egg/EGG-INFO/entry_points.txt 1970-01-01 00:00:00.000000000 +0000 @@ -1,59 +0,0 @@ -[distutils.commands] -bdist_rpm = setuptools.command.bdist_rpm:bdist_rpm -rotate = setuptools.command.rotate:rotate -develop = setuptools.command.develop:develop -setopt = setuptools.command.setopt:setopt -build_py = setuptools.command.build_py:build_py -scriptsetup = setuptools.command.scriptsetup:scriptsetup -saveopts = setuptools.command.saveopts:saveopts -egg_info = setuptools.command.egg_info:egg_info -register = setuptools.command.register:register -install_egg_info = setuptools.command.install_egg_info:install_egg_info -alias = setuptools.command.alias:alias -easy_install = setuptools.command.easy_install:easy_install -install_scripts = setuptools.command.install_scripts:install_scripts -bdist_wininst = setuptools.command.bdist_wininst:bdist_wininst -bdist_egg = setuptools.command.bdist_egg:bdist_egg -install = setuptools.command.install:install -test = setuptools.command.test:test -install_lib = setuptools.command.install_lib:install_lib -build_ext = setuptools.command.build_ext:build_ext -sdist = setuptools.command.sdist:sdist - -[egg_info.writers] -dependency_links.txt = setuptools.command.egg_info:overwrite_arg -requires.txt = setuptools.command.egg_info:write_requirements -PKG-INFO = setuptools.command.egg_info:write_pkg_info -eager_resources.txt = setuptools.command.egg_info:overwrite_arg -top_level.txt = setuptools.command.egg_info:write_toplevel_names -namespace_packages.txt = setuptools.command.egg_info:overwrite_arg -entry_points.txt = setuptools.command.egg_info:write_entries -depends.txt = setuptools.command.egg_info:warn_depends_obsolete - -[console_scripts] -easy_install_z-2.6 = setuptools.command.easy_install:main -easy_install_z = setuptools.command.easy_install:main - -[setuptools.file_finders] -svn_cvs = setuptools.command.sdist:_default_revctrl - -[distutils.setup_keywords] -dependency_links = setuptools.dist:assert_string_list -entry_points = setuptools.dist:check_entry_points -extras_require = setuptools.dist:check_extras -test_runner = setuptools.dist:check_importable -package_data = setuptools.dist:check_package_data -install_requires = setuptools.dist:check_requirements -include_package_data = setuptools.dist:assert_bool -exclude_package_data = setuptools.dist:check_package_data -namespace_packages = setuptools.dist:check_nsp -test_suite = setuptools.dist:check_test_suite -eager_resources = setuptools.dist:assert_string_list -zip_safe = setuptools.dist:assert_bool -test_loader = setuptools.dist:check_importable -packages = setuptools.dist:check_packages -tests_require = setuptools.dist:check_requirements - -[setuptools.installation] -eggsecutable = setuptools.command.easy_install:bootstrap - diff -Nru tahoe-lafs-1.9.2/setuptools-0.6c16dev3.egg/EGG-INFO/top_level.txt tahoe-lafs-1.10.0/setuptools-0.6c16dev3.egg/EGG-INFO/top_level.txt --- tahoe-lafs-1.9.2/setuptools-0.6c16dev3.egg/EGG-INFO/top_level.txt 2012-05-14 02:07:18.000000000 +0000 +++ tahoe-lafs-1.10.0/setuptools-0.6c16dev3.egg/EGG-INFO/top_level.txt 1970-01-01 00:00:00.000000000 +0000 @@ -1,3 +0,0 @@ -easy_install -pkg_resources -setuptools diff -Nru tahoe-lafs-1.9.2/setuptools-0.6c16dev3.egg/EGG-INFO/zip-safe tahoe-lafs-1.10.0/setuptools-0.6c16dev3.egg/EGG-INFO/zip-safe --- tahoe-lafs-1.9.2/setuptools-0.6c16dev3.egg/EGG-INFO/zip-safe 2012-05-14 02:07:18.000000000 +0000 +++ tahoe-lafs-1.10.0/setuptools-0.6c16dev3.egg/EGG-INFO/zip-safe 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ - diff -Nru tahoe-lafs-1.9.2/setuptools-0.6c16dev3.egg/easy_install.py tahoe-lafs-1.10.0/setuptools-0.6c16dev3.egg/easy_install.py --- tahoe-lafs-1.9.2/setuptools-0.6c16dev3.egg/easy_install.py 2012-05-14 02:07:18.000000000 +0000 +++ tahoe-lafs-1.10.0/setuptools-0.6c16dev3.egg/easy_install.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,5 +0,0 @@ -"""Run the EasyInstall command""" - -if __name__ == '__main__': - from setuptools.command.easy_install import main - main() diff -Nru tahoe-lafs-1.9.2/setuptools-0.6c16dev3.egg/pkg_resources.py tahoe-lafs-1.10.0/setuptools-0.6c16dev3.egg/pkg_resources.py --- tahoe-lafs-1.9.2/setuptools-0.6c16dev3.egg/pkg_resources.py 2012-05-14 02:07:18.000000000 +0000 +++ tahoe-lafs-1.10.0/setuptools-0.6c16dev3.egg/pkg_resources.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,2653 +0,0 @@ -"""Package resource API --------------------- - -A resource is a logical file contained within a package, or a logical -subdirectory thereof. The package resource API expects resource names -to have their path parts separated with ``/``, *not* whatever the local -path separator is. Do not use os.path operations to manipulate resource -names being passed into the API. - -The package resource API is designed to work with normal filesystem packages, -.egg files, and unpacked .egg files. It can also work in a limited way with -.zip files and with custom PEP 302 loaders that support the ``get_data()`` -method. -""" - -import sys, os, zipimport, time, re, imp - -try: - frozenset -except NameError: - from sets import ImmutableSet as frozenset - -# capture these to bypass sandboxing -from os import utime, rename, unlink, mkdir -from os import open as os_open -from os.path import isdir, split - -def _bypass_ensure_directory(name, mode=0777): - # Sandbox-bypassing version of ensure_directory() - dirname, filename = split(name) - if dirname and filename and not isdir(dirname): - _bypass_ensure_directory(dirname) - mkdir(dirname, mode) - - - - - - - - -_state_vars = {} - -def _declare_state(vartype, **kw): - g = globals() - for name, val in kw.iteritems(): - g[name] = val - _state_vars[name] = vartype - -def __getstate__(): - state = {} - g = globals() - for k, v in _state_vars.iteritems(): - state[k] = g['_sget_'+v](g[k]) - return state - -def __setstate__(state): - g = globals() - for k, v in state.iteritems(): - g['_sset_'+_state_vars[k]](k, g[k], v) - return state - -def _sget_dict(val): - return val.copy() - -def _sset_dict(key, ob, state): - ob.clear() - ob.update(state) - -def _sget_object(val): - return val.__getstate__() - -def _sset_object(key, ob, state): - ob.__setstate__(state) - -_sget_none = _sset_none = lambda *args: None - - - - - - -def get_supported_platform(): - """Return this platform's maximum compatible version. - - distutils.util.get_platform() normally reports the minimum version - of Mac OS X that would be required to *use* extensions produced by - distutils. But what we want when checking compatibility is to know the - version of Mac OS X that we are *running*. To allow usage of packages that - explicitly require a newer version of Mac OS X, we must also know the - current version of the OS. - - If this condition occurs for any other platform with a version in its - platform strings, this function should be extended accordingly. - """ - plat = get_build_platform(); m = macosVersionString.match(plat) - if m is not None and sys.platform == "darwin": - try: - plat = 'macosx-%s-%s' % ('.'.join(_macosx_vers()[:2]), m.group(3)) - except ValueError: - pass # not Mac OS X - return plat - - - - - - - - - - - - - - - - - - - - - -__all__ = [ - # Basic resource access and distribution/entry point discovery - 'require', 'run_script', 'get_provider', 'get_distribution', - 'load_entry_point', 'get_entry_map', 'get_entry_info', 'iter_entry_points', - 'resource_string', 'resource_stream', 'resource_filename', - 'resource_listdir', 'resource_exists', 'resource_isdir', - - # Environmental control - 'declare_namespace', 'working_set', 'add_activation_listener', - 'find_distributions', 'set_extraction_path', 'cleanup_resources', - 'get_default_cache', - - # Primary implementation classes - 'Environment', 'WorkingSet', 'ResourceManager', - 'Distribution', 'Requirement', 'EntryPoint', - - # Exceptions - 'ResolutionError','VersionConflict','DistributionNotFound','UnknownExtra', - 'ExtractionError', - - # Parsing functions and string utilities - 'parse_requirements', 'parse_version', 'safe_name', 'safe_version', - 'get_platform', 'compatible_platforms', 'yield_lines', 'split_sections', - 'safe_extra', 'to_filename', - - # filesystem utilities - 'ensure_directory', 'normalize_path', - - # Distribution "precedence" constants - 'EGG_DIST', 'BINARY_DIST', 'SOURCE_DIST', 'CHECKOUT_DIST', 'DEVELOP_DIST', - - # "Provider" interfaces, implementations, and registration/lookup APIs - 'IMetadataProvider', 'IResourceProvider', 'FileMetadata', - 'PathMetadata', 'EggMetadata', 'EmptyProvider', 'empty_provider', - 'NullProvider', 'EggProvider', 'DefaultProvider', 'ZipProvider', - 'register_finder', 'register_namespace_handler', 'register_loader_type', - 'fixup_namespace_packages', 'get_importer', - - # Deprecated/backward compatibility only - 'run_main', 'AvailableDistributions', -] -class ResolutionError(Exception): - """Abstract base for dependency resolution errors""" - def __repr__(self): - return self.__class__.__name__+repr(self.args) - -class VersionConflict(ResolutionError): - """An already-installed version conflicts with the requested version""" - -class DistributionNotFound(ResolutionError): - """A requested distribution was not found""" - -class UnknownExtra(ResolutionError): - """Distribution doesn't have an "extra feature" of the given name""" - -_provider_factories = {} -PY_MAJOR = sys.version[:3] -EGG_DIST = 3 -BINARY_DIST = 2 -SOURCE_DIST = 1 -CHECKOUT_DIST = 0 -DEVELOP_DIST = -1 - -def register_loader_type(loader_type, provider_factory): - """Register `provider_factory` to make providers for `loader_type` - - `loader_type` is the type or class of a PEP 302 ``module.__loader__``, - and `provider_factory` is a function that, passed a *module* object, - returns an ``IResourceProvider`` for that module. - """ - _provider_factories[loader_type] = provider_factory - -def get_provider(moduleOrReq): - """Return an IResourceProvider for the named module or requirement""" - if isinstance(moduleOrReq,Requirement): - return working_set.find(moduleOrReq) or require(str(moduleOrReq))[0] - try: - module = sys.modules[moduleOrReq] - except KeyError: - __import__(moduleOrReq) - module = sys.modules[moduleOrReq] - loader = getattr(module, '__loader__', None) - return _find_adapter(_provider_factories, loader)(module) - -def _macosx_vers(_cache=[]): - if not _cache: - from platform import mac_ver - _cache.append(mac_ver()[0].split('.')) - return _cache[0] - -def _macosx_arch(machine): - return {'PowerPC':'ppc', 'Power_Macintosh':'ppc'}.get(machine,machine) - -def get_build_platform(): - """Return this platform's string for platform-specific distributions - - XXX Currently this is the same as ``distutils.util.get_platform()``, but it - needs some hacks for Linux and Mac OS X. - """ - from distutils.util import get_platform - plat = get_platform() - if sys.platform == "darwin" and not plat.startswith('macosx-'): - try: - version = _macosx_vers() - machine = os.uname()[4].replace(" ", "_") - return "macosx-%d.%d-%s" % (int(version[0]), int(version[1]), - _macosx_arch(machine)) - except ValueError: - # if someone is running a non-Mac darwin system, this will fall - # through to the default implementation - pass - return plat - -macosVersionString = re.compile(r"macosx-(\d+)\.(\d+)-(.*)") -darwinVersionString = re.compile(r"darwin-(\d+)\.(\d+)\.(\d+)-(.*)") -get_platform = get_build_platform # XXX backward compat - - - - - - - -def compatible_platforms(provided,required): - """Can code for the `provided` platform run on the `required` platform? - - Returns true if either platform is ``None``, or the platforms are equal. - - XXX Needs compatibility checks for Linux and other unixy OSes. - """ - if provided is None or required is None or provided==required: - return True # easy case - - # Mac OS X special cases - reqMac = macosVersionString.match(required) - if reqMac: - provMac = macosVersionString.match(provided) - - # is this a Mac package? - if not provMac: - # this is backwards compatibility for packages built before - # setuptools 0.6. All packages built after this point will - # use the new macosx designation. - provDarwin = darwinVersionString.match(provided) - if provDarwin: - dversion = int(provDarwin.group(1)) - macosversion = "%s.%s" % (reqMac.group(1), reqMac.group(2)) - if dversion == 7 and macosversion >= "10.3" or \ - dversion == 8 and macosversion >= "10.4": - - #import warnings - #warnings.warn("Mac eggs should be rebuilt to " - # "use the macosx designation instead of darwin.", - # category=DeprecationWarning) - return True - return False # egg isn't macosx or legacy darwin - - # are they the same major version and machine type? - if provMac.group(1) != reqMac.group(1) or \ - provMac.group(3) != reqMac.group(3): - return False - - - - # is the required OS major update >= the provided one? - if int(provMac.group(2)) > int(reqMac.group(2)): - return False - - return True - - # XXX Linux and other platforms' special cases should go here - return False - - -def run_script(dist_spec, script_name): - """Locate distribution `dist_spec` and run its `script_name` script""" - ns = sys._getframe(1).f_globals - name = ns['__name__'] - ns.clear() - ns['__name__'] = name - require(dist_spec)[0].run_script(script_name, ns) - -run_main = run_script # backward compatibility - -def get_distribution(dist): - """Return a current distribution object for a Requirement or string""" - if isinstance(dist,basestring): dist = Requirement.parse(dist) - if isinstance(dist,Requirement): dist = get_provider(dist) - if not isinstance(dist,Distribution): - raise TypeError("Expected string, Requirement, or Distribution", dist) - return dist - -def load_entry_point(dist, group, name): - """Return `name` entry point of `group` for `dist` or raise ImportError""" - return get_distribution(dist).load_entry_point(group, name) - -def get_entry_map(dist, group=None): - """Return the entry point map for `group`, or the full entry map""" - return get_distribution(dist).get_entry_map(group) - -def get_entry_info(dist, group, name): - """Return the EntryPoint object for `group`+`name`, or ``None``""" - return get_distribution(dist).get_entry_info(group, name) - - -class IMetadataProvider: - - def has_metadata(name): - """Does the package's distribution contain the named metadata?""" - - def get_metadata(name): - """The named metadata resource as a string""" - - def get_metadata_lines(name): - """Yield named metadata resource as list of non-blank non-comment lines - - Leading and trailing whitespace is stripped from each line, and lines - with ``#`` as the first non-blank character are omitted.""" - - def metadata_isdir(name): - """Is the named metadata a directory? (like ``os.path.isdir()``)""" - - def metadata_listdir(name): - """List of metadata names in the directory (like ``os.listdir()``)""" - - def run_script(script_name, namespace): - """Execute the named script in the supplied namespace dictionary""" - - - - - - - - - - - - - - - - - - - -class IResourceProvider(IMetadataProvider): - """An object that provides access to package resources""" - - def get_resource_filename(manager, resource_name): - """Return a true filesystem path for `resource_name` - - `manager` must be an ``IResourceManager``""" - - def get_resource_stream(manager, resource_name): - """Return a readable file-like object for `resource_name` - - `manager` must be an ``IResourceManager``""" - - def get_resource_string(manager, resource_name): - """Return a string containing the contents of `resource_name` - - `manager` must be an ``IResourceManager``""" - - def has_resource(resource_name): - """Does the package contain the named resource?""" - - def resource_isdir(resource_name): - """Is the named resource a directory? (like ``os.path.isdir()``)""" - - def resource_listdir(resource_name): - """List of resource names in the directory (like ``os.listdir()``)""" - - - - - - - - - - - - - - - -class WorkingSet(object): - """A collection of active distributions on sys.path (or a similar list)""" - - def __init__(self, entries=None): - """Create working set from list of path entries (default=sys.path)""" - self.entries = [] - self.entry_keys = {} - self.by_key = {} - self.callbacks = [] - - if entries is None: - entries = sys.path - - for entry in entries: - self.add_entry(entry) - - - def add_entry(self, entry): - """Add a path item to ``.entries``, finding any distributions on it - - ``find_distributions(entry, True)`` is used to find distributions - corresponding to the path entry, and they are added. `entry` is - always appended to ``.entries``, even if it is already present. - (This is because ``sys.path`` can contain the same value more than - once, and the ``.entries`` of the ``sys.path`` WorkingSet should always - equal ``sys.path``.) - """ - self.entry_keys.setdefault(entry, []) - self.entries.append(entry) - for dist in find_distributions(entry, True): - self.add(dist, entry, False) - - - def __contains__(self,dist): - """True if `dist` is the active distribution for its project""" - return self.by_key.get(dist.key) == dist - - - - - - def find(self, req): - """Find a distribution matching requirement `req` - - If there is an active distribution for the requested project, this - returns it as long as it meets the version requirement specified by - `req`. But, if there is an active distribution for the project and it - does *not* meet the `req` requirement, ``VersionConflict`` is raised. - If there is no active distribution for the requested project, ``None`` - is returned. - """ - dist = self.by_key.get(req.key) - if dist is not None and dist not in req: - raise VersionConflict(dist,req) # XXX add more info - else: - return dist - - def iter_entry_points(self, group, name=None): - """Yield entry point objects from `group` matching `name` - - If `name` is None, yields all entry points in `group` from all - distributions in the working set, otherwise only ones matching - both `group` and `name` are yielded (in distribution order). - """ - for dist in self: - entries = dist.get_entry_map(group) - if name is None: - for ep in entries.values(): - yield ep - elif name in entries: - yield entries[name] - - def run_script(self, requires, script_name): - """Locate distribution for `requires` and run `script_name` script""" - ns = sys._getframe(1).f_globals - name = ns['__name__'] - ns.clear() - ns['__name__'] = name - self.require(requires)[0].run_script(script_name, ns) - - - - def __iter__(self): - """Yield distributions for non-duplicate projects in the working set - - The yield order is the order in which the items' path entries were - added to the working set. - """ - seen = {} - for item in self.entries: - for key in self.entry_keys[item]: - if key not in seen: - seen[key]=1 - yield self.by_key[key] - - def add(self, dist, entry=None, insert=True): - """Add `dist` to working set, associated with `entry` - - If `entry` is unspecified, it defaults to the ``.location`` of `dist`. - On exit from this routine, `entry` is added to the end of the working - set's ``.entries`` (if it wasn't already present). - - `dist` is only added to the working set if it's for a project that - doesn't already have a distribution in the set. If it's added, any - callbacks registered with the ``subscribe()`` method will be called. - """ - if insert: - dist.insert_on(self.entries, entry) - - if entry is None: - entry = dist.location - keys = self.entry_keys.setdefault(entry,[]) - keys2 = self.entry_keys.setdefault(dist.location,[]) - if dist.key in self.by_key: - return # ignore hidden distros - - # If we have a __requires__ then we can already tell if this - # dist is unsatisfactory, in which case we won't add it. - if __requires__ is not None: - for thisreqstr in __requires__: - for thisreq in parse_requirements(thisreqstr): - if thisreq.key == dist.key: - if dist not in thisreq: - return - - - self.by_key[dist.key] = dist - if dist.key not in keys: - keys.append(dist.key) - if dist.key not in keys2: - keys2.append(dist.key) - self._added_new(dist) - - def resolve(self, requirements, env=None, installer=None): - """List all distributions needed to (recursively) meet `requirements` - - `requirements` must be a sequence of ``Requirement`` objects. `env`, - if supplied, should be an ``Environment`` instance. If - not supplied, it defaults to all distributions available within any - entry or distribution in the working set. `installer`, if supplied, - will be invoked with each requirement that cannot be met by an - already-installed distribution; it should return a ``Distribution`` or - ``None``. - """ - - requirements = list(requirements)[::-1] # set up the stack - processed = {} # set of processed requirements - best = {} # key -> dist - to_activate = [] - - while requirements: - req = requirements.pop(0) # process dependencies breadth-first - if req in processed: - # Ignore cyclic or redundant dependencies - continue - dist = best.get(req.key) - if dist is None: - # Find the best distribution and add it to the map - dist = self.by_key.get(req.key) - if dist is None: - if env is None: - env = Environment(self.entries) - dist = best[req.key] = env.best_match(req, self, installer) - if dist is None: - raise DistributionNotFound(req) # XXX put more info here - to_activate.append(dist) - if dist not in req: - # Oops, the "best" so far conflicts with a dependency - raise VersionConflict(dist,req) # XXX put more info here - requirements.extend(dist.requires(req.extras)[::-1]) - processed[req] = True - - return to_activate # return list of distros to activate - - def find_plugins(self, - plugin_env, full_env=None, installer=None, fallback=True - ): - """Find all activatable distributions in `plugin_env` - - Example usage:: - - distributions, errors = working_set.find_plugins( - Environment(plugin_dirlist) - ) - map(working_set.add, distributions) # add plugins+libs to sys.path - print "Couldn't load", errors # display errors - - The `plugin_env` should be an ``Environment`` instance that contains - only distributions that are in the project's "plugin directory" or - directories. The `full_env`, if supplied, should be an ``Environment`` - contains all currently-available distributions. If `full_env` is not - supplied, one is created automatically from the ``WorkingSet`` this - method is called on, which will typically mean that every directory on - ``sys.path`` will be scanned for distributions. - - `installer` is a standard installer callback as used by the - ``resolve()`` method. The `fallback` flag indicates whether we should - attempt to resolve older versions of a plugin if the newest version - cannot be resolved. - - This method returns a 2-tuple: (`distributions`, `error_info`), where - `distributions` is a list of the distributions found in `plugin_env` - that were loadable, along with any other distributions that are needed - to resolve their dependencies. `error_info` is a dictionary mapping - unloadable plugin distributions to an exception instance describing the - error that occurred. Usually this will be a ``DistributionNotFound`` or - ``VersionConflict`` instance. - """ - - plugin_projects = list(plugin_env) - plugin_projects.sort() # scan project names in alphabetic order - - error_info = {} - distributions = {} - - if full_env is None: - env = Environment(self.entries) - env += plugin_env - else: - env = full_env + plugin_env - - shadow_set = self.__class__([]) - map(shadow_set.add, self) # put all our entries in shadow_set - - for project_name in plugin_projects: - - for dist in plugin_env[project_name]: - - req = [dist.as_requirement()] - - try: - resolvees = shadow_set.resolve(req, env, installer) - - except ResolutionError,v: - error_info[dist] = v # save error info - if fallback: - continue # try the next older version of project - else: - break # give up on this project, keep going - - else: - map(shadow_set.add, resolvees) - distributions.update(dict.fromkeys(resolvees)) - - # success, no need to try any more versions of this project - break - - distributions = list(distributions) - distributions.sort() - - return distributions, error_info - - - - - - def require(self, *requirements): - """Ensure that distributions matching `requirements` are activated - - `requirements` must be a string or a (possibly-nested) sequence - thereof, specifying the distributions and versions required. The - return value is a sequence of the distributions that needed to be - activated to fulfill the requirements; all relevant distributions are - included, even if they were already activated in this working set. - """ - needed = self.resolve(parse_requirements(requirements)) - - for dist in needed: - self.add(dist) - - return needed - - def subscribe(self, callback): - """Invoke `callback` for all distributions (including existing ones)""" - if callback in self.callbacks: - return - self.callbacks.append(callback) - for dist in self: - callback(dist) - - def _added_new(self, dist): - for callback in self.callbacks: - callback(dist) - - def __getstate__(self): - return ( - self.entries[:], self.entry_keys.copy(), self.by_key.copy(), - self.callbacks[:] - ) - - def __setstate__(self, (entries, keys, by_key, callbacks)): - self.entries = entries[:] - self.entry_keys = keys.copy() - self.by_key = by_key.copy() - self.callbacks = callbacks[:] - - -class Environment(object): - """Searchable snapshot of distributions on a search path""" - - def __init__(self, search_path=None, platform=get_supported_platform(), python=PY_MAJOR): - """Snapshot distributions available on a search path - - Any distributions found on `search_path` are added to the environment. - `search_path` should be a sequence of ``sys.path`` items. If not - supplied, ``sys.path`` is used. - - `platform` is an optional string specifying the name of the platform - that platform-specific distributions must be compatible with. If - unspecified, it defaults to the current platform. `python` is an - optional string naming the desired version of Python (e.g. ``'2.4'``); - it defaults to the current version. - - You may explicitly set `platform` (and/or `python`) to ``None`` if you - wish to map *all* distributions, not just those compatible with the - running platform or Python version. - """ - self._distmap = {} - self._cache = {} - self.platform = platform - self.python = python - self.scan(search_path) - - def can_add(self, dist): - """Is distribution `dist` acceptable for this environment? - - The distribution must match the platform and python version - requirements specified when this environment was created, or False - is returned. - """ - return (self.python is None or dist.py_version is None - or dist.py_version==self.python) \ - and compatible_platforms(dist.platform,self.platform) - - def remove(self, dist): - """Remove `dist` from the environment""" - self._distmap[dist.key].remove(dist) - - def scan(self, search_path=None): - """Scan `search_path` for distributions usable in this environment - - Any distributions found are added to the environment. - `search_path` should be a sequence of ``sys.path`` items. If not - supplied, ``sys.path`` is used. Only distributions conforming to - the platform/python version defined at initialization are added. - """ - if search_path is None: - search_path = sys.path - - for item in search_path: - for dist in find_distributions(item): - self.add(dist) - - def __getitem__(self,project_name): - """Return a newest-to-oldest list of distributions for `project_name` - """ - try: - return self._cache[project_name] - except KeyError: - project_name = project_name.lower() - if project_name not in self._distmap: - return [] - - if project_name not in self._cache: - dists = self._cache[project_name] = self._distmap[project_name] - _sort_dists(dists) - - return self._cache[project_name] - - def add(self,dist): - """Add `dist` if we ``can_add()`` it and it isn't already added""" - if self.can_add(dist) and dist.has_version(): - dists = self._distmap.setdefault(dist.key,[]) - if dist not in dists: - dists.append(dist) - if dist.key in self._cache: - _sort_dists(self._cache[dist.key]) - - - def best_match(self, req, working_set, installer=None): - """Find distribution best matching `req` and usable on `working_set` - - This calls the ``find(req)`` method of the `working_set` to see if a - suitable distribution is already active. (This may raise - ``VersionConflict`` if an unsuitable version of the project is already - active in the specified `working_set`.) - - If a suitable distribution isn't active, this method returns the - newest platform-dependent distribution in the environment that meets - the ``Requirement`` in `req`. If no suitable platform-dependent - distribution is found, then the newest platform-independent - distribution that meets the requirement is returned. (A platform- - dependent distribution will typically have code compiled or - specialized for that platform.) - - Otherwise, if `installer` is supplied, then the result of calling the - environment's ``obtain(req, installer)`` method will be returned. - """ - dist = working_set.find(req) - if dist is not None: - return dist - - # first try to find a platform-dependent dist - for dist in self[req.key]: - if dist in req and dist.platform is not None: - return dist - - # then try any other dist - for dist in self[req.key]: - if dist in req: - return dist - - return self.obtain(req, installer) # try and download/install - - def obtain(self, requirement, installer=None): - """Obtain a distribution matching `requirement` (e.g. via download) - - Obtain a distro that matches requirement (e.g. via download). In the - base ``Environment`` class, this routine just returns - ``installer(requirement)``, unless `installer` is None, in which case - None is returned instead. This method is a hook that allows subclasses - to attempt other ways of obtaining a distribution before falling back - to the `installer` argument.""" - if installer is not None: - return installer(requirement) - - def __iter__(self): - """Yield the unique project names of the available distributions""" - for key in self._distmap.keys(): - if self[key]: yield key - - - - - def __iadd__(self, other): - """In-place addition of a distribution or environment""" - if isinstance(other,Distribution): - self.add(other) - elif isinstance(other,Environment): - for project in other: - for dist in other[project]: - self.add(dist) - else: - raise TypeError("Can't add %r to environment" % (other,)) - return self - - def __add__(self, other): - """Add an environment or distribution to an environment""" - new = self.__class__([], platform=None, python=None) - for env in self, other: - new += env - return new - - -AvailableDistributions = Environment # XXX backward compatibility - - -class ExtractionError(RuntimeError): - """An error occurred extracting a resource - - The following attributes are available from instances of this exception: - - manager - The resource manager that raised this exception - - cache_path - The base directory for resource extraction - - original_error - The exception instance that caused extraction to fail - """ - - - - -class ResourceManager: - """Manage resource extraction and packages""" - extraction_path = None - - def __init__(self): - self.cached_files = {} - - def resource_exists(self, package_or_requirement, resource_name): - """Does the named resource exist?""" - return get_provider(package_or_requirement).has_resource(resource_name) - - def resource_isdir(self, package_or_requirement, resource_name): - """Is the named resource an existing directory?""" - return get_provider(package_or_requirement).resource_isdir( - resource_name - ) - - def resource_filename(self, package_or_requirement, resource_name): - """Return a true filesystem path for specified resource""" - return get_provider(package_or_requirement).get_resource_filename( - self, resource_name - ) - - def resource_stream(self, package_or_requirement, resource_name): - """Return a readable file-like object for specified resource""" - return get_provider(package_or_requirement).get_resource_stream( - self, resource_name - ) - - def resource_string(self, package_or_requirement, resource_name): - """Return specified resource as a string""" - return get_provider(package_or_requirement).get_resource_string( - self, resource_name - ) - - def resource_listdir(self, package_or_requirement, resource_name): - """List the contents of the named resource directory""" - return get_provider(package_or_requirement).resource_listdir( - resource_name - ) - - def extraction_error(self): - """Give an error message for problems extracting file(s)""" - - old_exc = sys.exc_info()[1] - cache_path = self.extraction_path or get_default_cache() - - err = ExtractionError("""Can't extract file(s) to egg cache - -The following error occurred while trying to extract file(s) to the Python egg -cache: - - %s - -The Python egg cache directory is currently set to: - - %s - -Perhaps your account does not have write access to this directory? You can -change the cache directory by setting the PYTHON_EGG_CACHE environment -variable to point to an accessible directory. -""" % (old_exc, cache_path) - ) - err.manager = self - err.cache_path = cache_path - err.original_error = old_exc - raise err - - - - - - - - - - - - - - - - def get_cache_path(self, archive_name, names=()): - """Return absolute location in cache for `archive_name` and `names` - - The parent directory of the resulting path will be created if it does - not already exist. `archive_name` should be the base filename of the - enclosing egg (which may not be the name of the enclosing zipfile!), - including its ".egg" extension. `names`, if provided, should be a - sequence of path name parts "under" the egg's extraction location. - - This method should only be called by resource providers that need to - obtain an extraction location, and only for names they intend to - extract, as it tracks the generated names for possible cleanup later. - """ - extract_path = self.extraction_path or get_default_cache() - target_path = os.path.join(extract_path, archive_name+'-tmp', *names) - try: - _bypass_ensure_directory(target_path) - except: - self.extraction_error() - - self.cached_files[target_path] = 1 - return target_path - - - - - - - - - - - - - - - - - - - - def postprocess(self, tempname, filename): - """Perform any platform-specific postprocessing of `tempname` - - This is where Mac header rewrites should be done; other platforms don't - have anything special they should do. - - Resource providers should call this method ONLY after successfully - extracting a compressed resource. They must NOT call it on resources - that are already in the filesystem. - - `tempname` is the current (temporary) name of the file, and `filename` - is the name it will be renamed to by the caller after this routine - returns. - """ - - if os.name == 'posix': - # Make the resource executable - mode = ((os.stat(tempname).st_mode) | 0555) & 07777 - os.chmod(tempname, mode) - - - - - - - - - - - - - - - - - - - - - - - def set_extraction_path(self, path): - """Set the base path where resources will be extracted to, if needed. - - If you do not call this routine before any extractions take place, the - path defaults to the return value of ``get_default_cache()``. (Which - is based on the ``PYTHON_EGG_CACHE`` environment variable, with various - platform-specific fallbacks. See that routine's documentation for more - details.) - - Resources are extracted to subdirectories of this path based upon - information given by the ``IResourceProvider``. You may set this to a - temporary directory, but then you must call ``cleanup_resources()`` to - delete the extracted files when done. There is no guarantee that - ``cleanup_resources()`` will be able to remove all extracted files. - - (Note: you may not change the extraction path for a given resource - manager once resources have been extracted, unless you first call - ``cleanup_resources()``.) - """ - if self.cached_files: - raise ValueError( - "Can't change extraction path, files already extracted" - ) - - self.extraction_path = path - - def cleanup_resources(self, force=False): - """ - Delete all extracted resource files and directories, returning a list - of the file and directory names that could not be successfully removed. - This function does not have any concurrency protection, so it should - generally only be called when the extraction path is a temporary - directory exclusive to a single process. This method is not - automatically called; you must call it explicitly or register it as an - ``atexit`` function if you wish to ensure cleanup of a temporary - directory used for extractions. - """ - # XXX - - - -def get_default_cache(): - """Determine the default cache location - - This returns the ``PYTHON_EGG_CACHE`` environment variable, if set. - Otherwise, on Windows, it returns a "Python-Eggs" subdirectory of the - "Application Data" directory. On all other systems, it's "~/.python-eggs". - """ - try: - return os.environ['PYTHON_EGG_CACHE'] - except KeyError: - pass - - if os.name!='nt': - return os.path.expanduser('~/.python-eggs') - - app_data = 'Application Data' # XXX this may be locale-specific! - app_homes = [ - (('APPDATA',), None), # best option, should be locale-safe - (('USERPROFILE',), app_data), - (('HOMEDRIVE','HOMEPATH'), app_data), - (('HOMEPATH',), app_data), - (('HOME',), None), - (('WINDIR',), app_data), # 95/98/ME - ] - - for keys, subdir in app_homes: - dirname = '' - for key in keys: - if key in os.environ: - dirname = os.path.join(dirname, os.environ[key]) - else: - break - else: - if subdir: - dirname = os.path.join(dirname,subdir) - return os.path.join(dirname, 'Python-Eggs') - else: - raise RuntimeError( - "Please set the PYTHON_EGG_CACHE enviroment variable" - ) - -def safe_name(name): - """Convert an arbitrary string to a standard distribution name - - Any runs of non-alphanumeric/. characters are replaced with a single '-'. - """ - return re.sub('[^A-Za-z0-9.]+', '-', name) - - -def safe_version(version): - """Convert an arbitrary string to a standard version string - - Spaces become dots, and all other non-alphanumeric characters become - dashes, with runs of multiple dashes condensed to a single dash. - """ - version = version.replace(' ','.') - return re.sub('[^A-Za-z0-9.]+', '-', version) - - -def safe_extra(extra): - """Convert an arbitrary string to a standard 'extra' name - - Any runs of non-alphanumeric characters are replaced with a single '_', - and the result is always lowercased. - """ - return re.sub('[^A-Za-z0-9.]+', '_', extra).lower() - - -def to_filename(name): - """Convert a project or version name to its filename-escaped form - - Any '-' characters are currently replaced with '_'. - """ - return name.replace('-','_') - - - - - - - - -class NullProvider: - """Try to implement resources and metadata for arbitrary PEP 302 loaders""" - - egg_name = None - egg_info = None - loader = None - - def __init__(self, module): - self.loader = getattr(module, '__loader__', None) - self.module_path = os.path.dirname(getattr(module, '__file__', '')) - - def get_resource_filename(self, manager, resource_name): - return self._fn(self.module_path, resource_name) - - def get_resource_stream(self, manager, resource_name): - return StringIO(self.get_resource_string(manager, resource_name)) - - def get_resource_string(self, manager, resource_name): - return self._get(self._fn(self.module_path, resource_name)) - - def has_resource(self, resource_name): - return self._has(self._fn(self.module_path, resource_name)) - - def has_metadata(self, name): - return self.egg_info and self._has(self._fn(self.egg_info,name)) - - def get_metadata(self, name): - if not self.egg_info: - return "" - return self._get(self._fn(self.egg_info,name)) - - def get_metadata_lines(self, name): - return yield_lines(self.get_metadata(name)) - - def resource_isdir(self,resource_name): - return self._isdir(self._fn(self.module_path, resource_name)) - - def metadata_isdir(self,name): - return self.egg_info and self._isdir(self._fn(self.egg_info,name)) - - - def resource_listdir(self,resource_name): - return self._listdir(self._fn(self.module_path,resource_name)) - - def metadata_listdir(self,name): - if self.egg_info: - return self._listdir(self._fn(self.egg_info,name)) - return [] - - def run_script(self,script_name,namespace): - script = 'scripts/'+script_name - if not self.has_metadata(script): - raise ResolutionError("No script named %r" % script_name) - script_text = self.get_metadata(script).replace('\r\n','\n') - script_text = script_text.replace('\r','\n') - script_filename = self._fn(self.egg_info,script) - namespace['__file__'] = script_filename - if os.path.exists(script_filename): - execfile(script_filename, namespace, namespace) - else: - from linecache import cache - cache[script_filename] = ( - len(script_text), 0, script_text.split('\n'), script_filename - ) - script_code = compile(script_text,script_filename,'exec') - exec script_code in namespace, namespace - - def _has(self, path): - raise NotImplementedError( - "Can't perform this operation for unregistered loader type" - ) - - def _isdir(self, path): - raise NotImplementedError( - "Can't perform this operation for unregistered loader type" - ) - - def _listdir(self, path): - raise NotImplementedError( - "Can't perform this operation for unregistered loader type" - ) - - def _fn(self, base, resource_name): - if resource_name: - return os.path.join(base, *resource_name.split('/')) - return base - - def _get(self, path): - if hasattr(self.loader, 'get_data'): - return self.loader.get_data(path) - raise NotImplementedError( - "Can't perform this operation for loaders without 'get_data()'" - ) - -register_loader_type(object, NullProvider) - - -class EggProvider(NullProvider): - """Provider based on a virtual filesystem""" - - def __init__(self,module): - NullProvider.__init__(self,module) - self._setup_prefix() - - def _setup_prefix(self): - # we assume here that our metadata may be nested inside a "basket" - # of multiple eggs; that's why we use module_path instead of .archive - path = self.module_path - old = None - while path!=old: - if path.lower().endswith('.egg'): - self.egg_name = os.path.basename(path) - self.egg_info = os.path.join(path, 'EGG-INFO') - self.egg_root = path - break - old = path - path, base = os.path.split(path) - - - - - - -class DefaultProvider(EggProvider): - """Provides access to package resources in the filesystem""" - - def _has(self, path): - return os.path.exists(path) - - def _isdir(self,path): - return os.path.isdir(path) - - def _listdir(self,path): - return os.listdir(path) - - def get_resource_stream(self, manager, resource_name): - return open(self._fn(self.module_path, resource_name), 'rb') - - def _get(self, path): - stream = open(path, 'rb') - try: - return stream.read() - finally: - stream.close() - -register_loader_type(type(None), DefaultProvider) - - -class EmptyProvider(NullProvider): - """Provider that returns nothing for all requests""" - - _isdir = _has = lambda self,path: False - _get = lambda self,path: '' - _listdir = lambda self,path: [] - module_path = None - - def __init__(self): - pass - -empty_provider = EmptyProvider() - - - - -class ZipProvider(EggProvider): - """Resource support for zips and eggs""" - - eagers = None - - def __init__(self, module): - EggProvider.__init__(self,module) - self.zipinfo = zipimport._zip_directory_cache[self.loader.archive] - self.zip_pre = self.loader.archive+os.sep - - def _zipinfo_name(self, fspath): - # Convert a virtual filename (full path to file) into a zipfile subpath - # usable with the zipimport directory cache for our target archive - if fspath.startswith(self.zip_pre): - return fspath[len(self.zip_pre):] - raise AssertionError( - "%s is not a subpath of %s" % (fspath,self.zip_pre) - ) - - def _parts(self,zip_path): - # Convert a zipfile subpath into an egg-relative path part list - fspath = self.zip_pre+zip_path # pseudo-fs path - if fspath.startswith(self.egg_root+os.sep): - return fspath[len(self.egg_root)+1:].split(os.sep) - raise AssertionError( - "%s is not a subpath of %s" % (fspath,self.egg_root) - ) - - def get_resource_filename(self, manager, resource_name): - if not self.egg_name: - raise NotImplementedError( - "resource_filename() only supported for .egg, not .zip" - ) - # no need to lock for extraction, since we use temp names - zip_path = self._resource_to_zip(resource_name) - eagers = self._get_eager_resources() - if '/'.join(self._parts(zip_path)) in eagers: - for name in eagers: - self._extract_resource(manager, self._eager_to_zip(name)) - return self._extract_resource(manager, zip_path) - - def _extract_resource(self, manager, zip_path): - - if zip_path in self._index(): - for name in self._index()[zip_path]: - last = self._extract_resource( - manager, os.path.join(zip_path, name) - ) - return os.path.dirname(last) # return the extracted directory name - - zip_stat = self.zipinfo[zip_path] - t,d,size = zip_stat[5], zip_stat[6], zip_stat[3] - date_time = ( - (d>>9)+1980, (d>>5)&0xF, d&0x1F, # ymd - (t&0xFFFF)>>11, (t>>5)&0x3F, (t&0x1F) * 2, 0, 0, -1 # hms, etc. - ) - timestamp = time.mktime(date_time) - - try: - real_path = manager.get_cache_path( - self.egg_name, self._parts(zip_path) - ) - - if os.path.isfile(real_path): - stat = os.stat(real_path) - if stat.st_size==size and stat.st_mtime==timestamp: - # size and stamp match, don't bother extracting - return real_path - - outf, tmpnam = _mkstemp(".$extract", dir=os.path.dirname(real_path)) - os.write(outf, self.loader.get_data(zip_path)) - os.close(outf) - utime(tmpnam, (timestamp,timestamp)) - manager.postprocess(tmpnam, real_path) - - try: - rename(tmpnam, real_path) - - except os.error: - if os.path.isfile(real_path): - stat = os.stat(real_path) - - if stat.st_size==size and stat.st_mtime==timestamp: - # size and stamp match, somebody did it just ahead of - # us, so we're done - return real_path - elif os.name=='nt': # Windows, del old file and retry - unlink(real_path) - rename(tmpnam, real_path) - return real_path - raise - - except os.error: - manager.extraction_error() # report a user-friendly error - - return real_path - - def _get_eager_resources(self): - if self.eagers is None: - eagers = [] - for name in ('native_libs.txt', 'eager_resources.txt'): - if self.has_metadata(name): - eagers.extend(self.get_metadata_lines(name)) - self.eagers = eagers - return self.eagers - - def _index(self): - try: - return self._dirindex - except AttributeError: - ind = {} - for path in self.zipinfo: - parts = path.split(os.sep) - while parts: - parent = os.sep.join(parts[:-1]) - if parent in ind: - ind[parent].append(parts[-1]) - break - else: - ind[parent] = [parts.pop()] - self._dirindex = ind - return ind - - def _has(self, fspath): - zip_path = self._zipinfo_name(fspath) - return zip_path in self.zipinfo or zip_path in self._index() - - def _isdir(self,fspath): - return self._zipinfo_name(fspath) in self._index() - - def _listdir(self,fspath): - return list(self._index().get(self._zipinfo_name(fspath), ())) - - def _eager_to_zip(self,resource_name): - return self._zipinfo_name(self._fn(self.egg_root,resource_name)) - - def _resource_to_zip(self,resource_name): - return self._zipinfo_name(self._fn(self.module_path,resource_name)) - -register_loader_type(zipimport.zipimporter, ZipProvider) - - - - - - - - - - - - - - - - - - - - - - - - -class FileMetadata(EmptyProvider): - """Metadata handler for standalone PKG-INFO files - - Usage:: - - metadata = FileMetadata("/path/to/PKG-INFO") - - This provider rejects all data and metadata requests except for PKG-INFO, - which is treated as existing, and will be the contents of the file at - the provided location. - """ - - def __init__(self,path): - self.path = path - - def has_metadata(self,name): - return name=='PKG-INFO' - - def get_metadata(self,name): - if name=='PKG-INFO': - return open(self.path,'rU').read() - raise KeyError("No metadata except PKG-INFO is available") - - def get_metadata_lines(self,name): - return yield_lines(self.get_metadata(name)) - - - - - - - - - - - - - - - - -class PathMetadata(DefaultProvider): - """Metadata provider for egg directories - - Usage:: - - # Development eggs: - - egg_info = "/path/to/PackageName.egg-info" - base_dir = os.path.dirname(egg_info) - metadata = PathMetadata(base_dir, egg_info) - dist_name = os.path.splitext(os.path.basename(egg_info))[0] - dist = Distribution(basedir,project_name=dist_name,metadata=metadata) - - # Unpacked egg directories: - - egg_path = "/path/to/PackageName-ver-pyver-etc.egg" - metadata = PathMetadata(egg_path, os.path.join(egg_path,'EGG-INFO')) - dist = Distribution.from_filename(egg_path, metadata=metadata) - """ - - def __init__(self, path, egg_info): - self.module_path = path - self.egg_info = egg_info - - -class EggMetadata(ZipProvider): - """Metadata provider for .egg files""" - - def __init__(self, importer): - """Create a metadata provider from a zipimporter""" - - self.zipinfo = zipimport._zip_directory_cache[importer.archive] - self.zip_pre = importer.archive+os.sep - self.loader = importer - if importer.prefix: - self.module_path = os.path.join(importer.archive, importer.prefix) - else: - self.module_path = importer.archive - self._setup_prefix() - - -class ImpWrapper: - """PEP 302 Importer that wraps Python's "normal" import algorithm""" - - def __init__(self, path=None): - self.path = path - - def find_module(self, fullname, path=None): - subname = fullname.split(".")[-1] - if subname != fullname and self.path is None: - return None - if self.path is None: - path = None - else: - path = [self.path] - try: - file, filename, etc = imp.find_module(subname, path) - except ImportError: - return None - return ImpLoader(file, filename, etc) - - -class ImpLoader: - """PEP 302 Loader that wraps Python's "normal" import algorithm""" - - def __init__(self, file, filename, etc): - self.file = file - self.filename = filename - self.etc = etc - - def load_module(self, fullname): - try: - mod = imp.load_module(fullname, self.file, self.filename, self.etc) - finally: - if self.file: self.file.close() - # Note: we don't set __loader__ because we want the module to look - # normal; i.e. this is just a wrapper for standard import machinery - return mod - - - - -def get_importer(path_item): - """Retrieve a PEP 302 "importer" for the given path item - - If there is no importer, this returns a wrapper around the builtin import - machinery. The returned importer is only cached if it was created by a - path hook. - """ - try: - importer = sys.path_importer_cache[path_item] - except KeyError: - for hook in sys.path_hooks: - try: - importer = hook(path_item) - except ImportError: - pass - else: - break - else: - importer = None - - sys.path_importer_cache.setdefault(path_item,importer) - if importer is None: - try: - importer = ImpWrapper(path_item) - except ImportError: - pass - return importer - - - - - - - - - - - - - - -_declare_state('dict', _distribution_finders = {}) - -def register_finder(importer_type, distribution_finder): - """Register `distribution_finder` to find distributions in sys.path items - - `importer_type` is the type or class of a PEP 302 "Importer" (sys.path item - handler), and `distribution_finder` is a callable that, passed a path - item and the importer instance, yields ``Distribution`` instances found on - that path item. See ``pkg_resources.find_on_path`` for an example.""" - _distribution_finders[importer_type] = distribution_finder - - -def find_distributions(path_item, only=False): - """Yield distributions accessible via `path_item`""" - importer = get_importer(path_item) - finder = _find_adapter(_distribution_finders, importer) - return finder(importer, path_item, only) - -def find_in_zip(importer, path_item, only=False): - metadata = EggMetadata(importer) - if metadata.has_metadata('PKG-INFO'): - yield Distribution.from_filename(path_item, metadata=metadata) - if only: - return # don't yield nested distros - for subitem in metadata.resource_listdir('/'): - if subitem.endswith('.egg'): - subpath = os.path.join(path_item, subitem) - for dist in find_in_zip(zipimport.zipimporter(subpath), subpath): - yield dist - -register_finder(zipimport.zipimporter, find_in_zip) - -def StringIO(*args, **kw): - """Thunk to load the real StringIO on demand""" - global StringIO - try: - from cStringIO import StringIO - except ImportError: - from StringIO import StringIO - return StringIO(*args,**kw) - -def find_nothing(importer, path_item, only=False): - return () -register_finder(object,find_nothing) - -def find_on_path(importer, path_item, only=False): - """Yield distributions accessible on a sys.path directory""" - path_item = _normalize_cached(path_item) - - if os.path.isdir(path_item) and os.access(path_item, os.R_OK): - if path_item.lower().endswith('.egg'): - # unpacked egg - yield Distribution.from_filename( - path_item, metadata=PathMetadata( - path_item, os.path.join(path_item,'EGG-INFO') - ) - ) - else: - # scan for .egg and .egg-info in directory - for entry in os.listdir(path_item): - lower = entry.lower() - if lower.endswith('.egg-info'): - fullpath = os.path.join(path_item, entry) - if os.path.isdir(fullpath): - # egg-info directory, allow getting metadata - metadata = PathMetadata(path_item, fullpath) - else: - metadata = FileMetadata(fullpath) - yield Distribution.from_location( - path_item,entry,metadata,precedence=DEVELOP_DIST - ) - elif not only and lower.endswith('.egg'): - for dist in find_distributions(os.path.join(path_item, entry)): - yield dist - elif not only and lower.endswith('.egg-link'): - for line in file(os.path.join(path_item, entry)): - if not line.strip(): continue - for item in find_distributions(os.path.join(path_item,line.rstrip())): - yield item - break -register_finder(ImpWrapper, find_on_path) - -_declare_state('dict', _namespace_handlers = {}) -_declare_state('dict', _namespace_packages = {}) - -def register_namespace_handler(importer_type, namespace_handler): - """Register `namespace_handler` to declare namespace packages - - `importer_type` is the type or class of a PEP 302 "Importer" (sys.path item - handler), and `namespace_handler` is a callable like this:: - - def namespace_handler(importer,path_entry,moduleName,module): - # return a path_entry to use for child packages - - Namespace handlers are only called if the importer object has already - agreed that it can handle the relevant path item, and they should only - return a subpath if the module __path__ does not already contain an - equivalent subpath. For an example namespace handler, see - ``pkg_resources.file_ns_handler``. - """ - _namespace_handlers[importer_type] = namespace_handler - -def _handle_ns(packageName, path_item): - """Ensure that named package includes a subpath of path_item (if needed)""" - importer = get_importer(path_item) - if importer is None: - return None - loader = importer.find_module(packageName) - if loader is None: - return None - module = sys.modules.get(packageName) - if module is None: - module = sys.modules[packageName] = imp.new_module(packageName) - module.__path__ = []; _set_parent_ns(packageName) - elif not hasattr(module,'__path__'): - raise TypeError("Not a package:", packageName) - handler = _find_adapter(_namespace_handlers, importer) - subpath = handler(importer,path_item,packageName,module) - if subpath is not None: - path = module.__path__; path.append(subpath) - loader.load_module(packageName); module.__path__ = path - return subpath - -def declare_namespace(packageName): - """Declare that package 'packageName' is a namespace package""" - - imp.acquire_lock() - try: - if packageName in _namespace_packages: - return - - path, parent = sys.path, None - if '.' in packageName: - parent = '.'.join(packageName.split('.')[:-1]) - declare_namespace(parent) - __import__(parent) - try: - path = sys.modules[parent].__path__ - except AttributeError: - raise TypeError("Not a package:", parent) - - # Track what packages are namespaces, so when new path items are added, - # they can be updated - _namespace_packages.setdefault(parent,[]).append(packageName) - _namespace_packages.setdefault(packageName,[]) - - for path_item in path: - # Ensure all the parent's path items are reflected in the child, - # if they apply - _handle_ns(packageName, path_item) - - finally: - imp.release_lock() - -def fixup_namespace_packages(path_item, parent=None): - """Ensure that previously-declared namespace packages include path_item""" - imp.acquire_lock() - try: - for package in _namespace_packages.get(parent,()): - subpath = _handle_ns(package, path_item) - if subpath: fixup_namespace_packages(subpath,package) - finally: - imp.release_lock() - -def file_ns_handler(importer, path_item, packageName, module): - """Compute an ns-package subpath for a filesystem or zipfile importer""" - - subpath = os.path.join(path_item, packageName.split('.')[-1]) - normalized = _normalize_cached(subpath) - for item in module.__path__: - if _normalize_cached(item)==normalized: - break - else: - # Only return the path if it's not already there - return subpath - -register_namespace_handler(ImpWrapper,file_ns_handler) -register_namespace_handler(zipimport.zipimporter,file_ns_handler) - - -def null_ns_handler(importer, path_item, packageName, module): - return None - -register_namespace_handler(object,null_ns_handler) - - -def normalize_path(filename): - """Normalize a file/dir name for comparison purposes""" - return os.path.normcase(os.path.realpath(filename)) - -def _normalize_cached(filename,_cache={}): - try: - return _cache[filename] - except KeyError: - _cache[filename] = result = normalize_path(filename) - return result - -def _set_parent_ns(packageName): - parts = packageName.split('.') - name = parts.pop() - if parts: - parent = '.'.join(parts) - setattr(sys.modules[parent], name, sys.modules[packageName]) - - -def yield_lines(strs): - """Yield non-empty/non-comment lines of a ``basestring`` or sequence""" - if isinstance(strs,basestring): - for s in strs.splitlines(): - s = s.strip() - if s and not s.startswith('#'): # skip blank lines/comments - yield s - else: - for ss in strs: - for s in yield_lines(ss): - yield s - -LINE_END = re.compile(r"\s*(#.*)?$").match # whitespace and comment -CONTINUE = re.compile(r"\s*\\\s*(#.*)?$").match # line continuation -DISTRO = re.compile(r"\s*((\w|[-.])+)").match # Distribution or extra -VERSION = re.compile(r"\s*(<=?|>=?|==|!=)\s*((\w|[-.])+)").match # ver. info -COMMA = re.compile(r"\s*,").match # comma between items -OBRACKET = re.compile(r"\s*\[").match -CBRACKET = re.compile(r"\s*\]").match -MODULE = re.compile(r"\w+(\.\w+)*$").match -EGG_NAME = re.compile( - r"(?P[^-]+)" - r"( -(?P[^-]+) (-py(?P[^-]+) (-(?P.+))? )? )?", - re.VERBOSE | re.IGNORECASE -).match - -component_re = re.compile(r'(\d+ | [a-z]+ | \.| -)', re.VERBOSE) -replace = {'pre':'c', 'preview':'c','-':'final-','rc':'c','dev':'@'}.get - -def _parse_version_parts(s): - for part in component_re.split(s): - part = replace(part,part) - if not part or part=='.': - continue - if part[:1] in '0123456789': - yield part.zfill(8) # pad for numeric comparison - else: - yield '*'+part - - yield '*final' # ensure that alpha/beta/candidate are before final - -def parse_version(s): - """Convert a version string to a chronologically-sortable key - - This is a rough cross between distutils' StrictVersion and LooseVersion; - if you give it versions that would work with StrictVersion, then it behaves - the same; otherwise it acts like a slightly-smarter LooseVersion. It is - *possible* to create pathological version coding schemes that will fool - this parser, but they should be very rare in practice. - - The returned value will be a tuple of strings. Numeric portions of the - version are padded to 8 digits so they will compare numerically, but - without relying on how numbers compare relative to strings. Dots are - dropped, but dashes are retained. Trailing zeros between alpha segments - or dashes are suppressed, so that e.g. "2.4.0" is considered the same as - "2.4". Alphanumeric parts are lower-cased. - - The algorithm assumes that strings like "-" and any alpha string that - alphabetically follows "final" represents a "patch level". So, "2.4-1" - is assumed to be a branch or patch of "2.4", and therefore "2.4.1" is - considered newer than "2.4-1", which in turn is newer than "2.4". - - Strings like "a", "b", "c", "alpha", "beta", "candidate" and so on (that - come before "final" alphabetically) are assumed to be pre-release versions, - so that the version "2.4" is considered newer than "2.4a1". - - Finally, to handle miscellaneous cases, the strings "pre", "preview", and - "rc" are treated as if they were "c", i.e. as though they were release - candidates, and therefore are not as new as a version string that does not - contain them, and "dev" is replaced with an '@' so that it sorts lower than - than any other pre-release tag. - """ - parts = [] - for part in _parse_version_parts(s.lower()): - if part.startswith('*'): - if part<'*final': # remove '-' before a prerelease tag - while parts and parts[-1]=='*final-': parts.pop() - # remove trailing zeros from each series of numeric parts - while parts and parts[-1]=='00000000': - parts.pop() - parts.append(part) - return tuple(parts) - -class EntryPoint(object): - """Object representing an advertised importable object""" - - def __init__(self, name, module_name, attrs=(), extras=(), dist=None): - if not MODULE(module_name): - raise ValueError("Invalid module name", module_name) - self.name = name - self.module_name = module_name - self.attrs = tuple(attrs) - self.extras = Requirement.parse(("x[%s]" % ','.join(extras))).extras - self.dist = dist - - def __str__(self): - s = "%s = %s" % (self.name, self.module_name) - if self.attrs: - s += ':' + '.'.join(self.attrs) - if self.extras: - s += ' [%s]' % ','.join(self.extras) - return s - - def __repr__(self): - return "EntryPoint.parse(%r)" % str(self) - - def load(self, require=True, env=None, installer=None): - if require: self.require(env, installer) - entry = __import__(self.module_name, globals(),globals(), ['__name__']) - for attr in self.attrs: - try: - entry = getattr(entry,attr) - except AttributeError: - raise ImportError("%r has no %r attribute" % (entry,attr)) - return entry - - def require(self, env=None, installer=None): - if self.extras and not self.dist: - raise UnknownExtra("Can't require() without a distribution", self) - map(working_set.add, - working_set.resolve(self.dist.requires(self.extras),env,installer)) - - - - #@classmethod - def parse(cls, src, dist=None): - """Parse a single entry point from string `src` - - Entry point syntax follows the form:: - - name = some.module:some.attr [extra1,extra2] - - The entry name and module name are required, but the ``:attrs`` and - ``[extras]`` parts are optional - """ - try: - attrs = extras = () - name,value = src.split('=',1) - if '[' in value: - value,extras = value.split('[',1) - req = Requirement.parse("x["+extras) - if req.specs: raise ValueError - extras = req.extras - if ':' in value: - value,attrs = value.split(':',1) - if not MODULE(attrs.rstrip()): - raise ValueError - attrs = attrs.rstrip().split('.') - except ValueError: - raise ValueError( - "EntryPoint must be in 'name=module:attrs [extras]' format", - src - ) - else: - return cls(name.strip(), value.strip(), attrs, extras, dist) - - parse = classmethod(parse) - - - - - - - - - #@classmethod - def parse_group(cls, group, lines, dist=None): - """Parse an entry point group""" - if not MODULE(group): - raise ValueError("Invalid group name", group) - this = {} - for line in yield_lines(lines): - ep = cls.parse(line, dist) - if ep.name in this: - raise ValueError("Duplicate entry point", group, ep.name) - this[ep.name]=ep - return this - - parse_group = classmethod(parse_group) - - #@classmethod - def parse_map(cls, data, dist=None): - """Parse a map of entry point groups""" - if isinstance(data,dict): - data = data.items() - else: - data = split_sections(data) - maps = {} - for group, lines in data: - if group is None: - if not lines: - continue - raise ValueError("Entry points must be listed in groups") - group = group.strip() - if group in maps: - raise ValueError("Duplicate group name", group) - maps[group] = cls.parse_group(group, lines, dist) - return maps - - parse_map = classmethod(parse_map) - - - - - - -class Distribution(object): - """Wrap an actual or potential sys.path entry w/metadata""" - def __init__(self, - location=None, metadata=None, project_name=None, version=None, - py_version=PY_MAJOR, platform=None, precedence = EGG_DIST - ): - self.project_name = safe_name(project_name or 'Unknown') - if version is not None: - self._version = safe_version(version) - self.py_version = py_version - self.platform = platform - self.location = location - self.precedence = precedence - self._provider = metadata or empty_provider - - #@classmethod - def from_location(cls,location,basename,metadata=None,**kw): - project_name, version, py_version, platform = [None]*4 - basename, ext = os.path.splitext(basename) - if ext.lower() in (".egg",".egg-info"): - match = EGG_NAME(basename) - if match: - project_name, version, py_version, platform = match.group( - 'name','ver','pyver','plat' - ) - return cls( - location, metadata, project_name=project_name, version=version, - py_version=py_version, platform=platform, **kw - ) - from_location = classmethod(from_location) - - hashcmp = property( - lambda self: ( - getattr(self,'parsed_version',()), self.precedence, self.key, - -len(self.location or ''), self.location, self.py_version, - self.platform - ) - ) - def __cmp__(self, other): return cmp(self.hashcmp, other) - def __hash__(self): return hash(self.hashcmp) - - # These properties have to be lazy so that we don't have to load any - # metadata until/unless it's actually needed. (i.e., some distributions - # may not know their name or version without loading PKG-INFO) - - #@property - def key(self): - try: - return self._key - except AttributeError: - self._key = key = self.project_name.lower() - return key - key = property(key) - - #@property - def parsed_version(self): - try: - return self._parsed_version - except AttributeError: - self._parsed_version = pv = parse_version(self.version) - return pv - - parsed_version = property(parsed_version) - - #@property - def version(self): - try: - return self._version - except AttributeError: - for line in self._get_metadata('PKG-INFO'): - if line.lower().startswith('version:'): - self._version = safe_version(line.split(':',1)[1].strip()) - return self._version - else: - raise ValueError( - "Missing 'Version:' header and/or PKG-INFO file", self - ) - version = property(version) - - - - - #@property - def _dep_map(self): - try: - return self.__dep_map - except AttributeError: - dm = self.__dep_map = {None: []} - for name in 'requires.txt', 'depends.txt': - for extra,reqs in split_sections(self._get_metadata(name)): - if extra: extra = safe_extra(extra) - dm.setdefault(extra,[]).extend(parse_requirements(reqs)) - return dm - _dep_map = property(_dep_map) - - def requires(self,extras=()): - """List of Requirements needed for this distro if `extras` are used""" - dm = self._dep_map - deps = [] - deps.extend(dm.get(None,())) - for ext in extras: - try: - deps.extend(dm[safe_extra(ext)]) - except KeyError: - raise UnknownExtra( - "%s has no such extra feature %r" % (self, ext) - ) - return deps - - def _get_metadata(self,name): - if self.has_metadata(name): - for line in self.get_metadata_lines(name): - yield line - - def activate(self,path=None): - """Ensure distribution is importable on `path` (default=sys.path)""" - if path is None: path = sys.path - self.insert_on(path) - if path is sys.path: - fixup_namespace_packages(self.location) - for pkg in self._get_metadata('namespace_packages.txt'): - if pkg in sys.modules: declare_namespace(pkg) - - def egg_name(self): - """Return what this distribution's standard .egg filename should be""" - filename = "%s-%s-py%s" % ( - to_filename(self.project_name), to_filename(self.version), - self.py_version or PY_MAJOR - ) - - if self.platform: - filename += '-'+self.platform - return filename - - def __repr__(self): - if self.location: - return "%s (%s)" % (self,self.location) - else: - return str(self) - - def __str__(self): - try: version = getattr(self,'version',None) - except ValueError: version = None - version = version or "[unknown version]" - return "%s %s" % (self.project_name,version) - - def __getattr__(self,attr): - """Delegate all unrecognized public attributes to .metadata provider""" - if attr.startswith('_'): - raise AttributeError,attr - return getattr(self._provider, attr) - - #@classmethod - def from_filename(cls,filename,metadata=None, **kw): - return cls.from_location( - _normalize_cached(filename), os.path.basename(filename), metadata, - **kw - ) - from_filename = classmethod(from_filename) - - def as_requirement(self): - """Return a ``Requirement`` that matches this distribution exactly""" - return Requirement.parse('%s==%s' % (self.project_name, self.version)) - - def load_entry_point(self, group, name): - """Return the `name` entry point of `group` or raise ImportError""" - ep = self.get_entry_info(group,name) - if ep is None: - raise ImportError("Entry point %r not found" % ((group,name),)) - return ep.load() - - def get_entry_map(self, group=None): - """Return the entry point map for `group`, or the full entry map""" - try: - ep_map = self._ep_map - except AttributeError: - ep_map = self._ep_map = EntryPoint.parse_map( - self._get_metadata('entry_points.txt'), self - ) - if group is not None: - return ep_map.get(group,{}) - return ep_map - - def get_entry_info(self, group, name): - """Return the EntryPoint object for `group`+`name`, or ``None``""" - return self.get_entry_map(group).get(name) - - - - - - - - - - - - - - - - - - - - def insert_on(self, path, loc = None): - """Insert self.location in path before its nearest parent directory""" - - loc = loc or self.location - if not loc: - return - - nloc = _normalize_cached(loc) - bdir = os.path.dirname(nloc) - npath= [(p and _normalize_cached(p) or p) for p in path] - - bp = None - for p, item in enumerate(npath): - if item==nloc: - break - elif item==bdir and self.precedence==EGG_DIST: - # if it's an .egg, give it precedence over its directory - if path is sys.path: - self.check_version_conflict() - path.insert(p, loc) - npath.insert(p, nloc) - break - else: - if path is sys.path: - self.check_version_conflict() - path.append(loc) - return - - # p is the spot where we found or inserted loc; now remove duplicates - while 1: - try: - np = npath.index(nloc, p+1) - except ValueError: - break - else: - del npath[np], path[np] - p = np # ha! - - return - - - def check_version_conflict(self): - if self.key=='setuptools': - return # ignore the inevitable setuptools self-conflicts :( - - nsp = dict.fromkeys(self._get_metadata('namespace_packages.txt')) - loc = normalize_path(self.location) - for modname in self._get_metadata('top_level.txt'): - if (modname not in sys.modules or modname in nsp - or modname in _namespace_packages - ): - continue - - fn = getattr(sys.modules[modname], '__file__', None) - if fn and (normalize_path(fn).startswith(loc) or fn.startswith(loc)): - continue - issue_warning( - "Module %s was already imported from %s, but %s is being added" - " to sys.path" % (modname, fn, self.location), - ) - - def has_version(self): - try: - self.version - except ValueError: - issue_warning("Unbuilt egg for "+repr(self)) - return False - return True - - def clone(self,**kw): - """Copy this distribution, substituting in any changed keyword args""" - for attr in ( - 'project_name', 'version', 'py_version', 'platform', 'location', - 'precedence' - ): - kw.setdefault(attr, getattr(self,attr,None)) - kw.setdefault('metadata', self._provider) - return self.__class__(**kw) - - - - - #@property - def extras(self): - return [dep for dep in self._dep_map if dep] - extras = property(extras) - - -def issue_warning(*args,**kw): - level = 1 - g = globals() - try: - # find the first stack frame that is *not* code in - # the pkg_resources module, to use for the warning - while sys._getframe(level).f_globals is g: - level += 1 - except ValueError: - pass - from warnings import warn - warn(stacklevel = level+1, *args, **kw) - - - - - - - - - - - - - - - - - - - - - - - -def parse_requirements(strs): - """Yield ``Requirement`` objects for each specification in `strs` - - `strs` must be an instance of ``basestring``, or a (possibly-nested) - iterable thereof. - """ - # create a steppable iterator, so we can handle \-continuations - lines = iter(yield_lines(strs)) - - def scan_list(ITEM,TERMINATOR,line,p,groups,item_name): - - items = [] - - while not TERMINATOR(line,p): - if CONTINUE(line,p): - try: - line = lines.next(); p = 0 - except StopIteration: - raise ValueError( - "\\ must not appear on the last nonblank line" - ) - - match = ITEM(line,p) - if not match: - raise ValueError("Expected "+item_name+" in",line,"at",line[p:]) - - items.append(match.group(*groups)) - p = match.end() - - match = COMMA(line,p) - if match: - p = match.end() # skip the comma - elif not TERMINATOR(line,p): - raise ValueError( - "Expected ',' or end-of-list in",line,"at",line[p:] - ) - - match = TERMINATOR(line,p) - if match: p = match.end() # skip the terminator, if any - return line, p, items - - for line in lines: - match = DISTRO(line) - if not match: - raise ValueError("Missing distribution spec", line) - project_name = match.group(1) - p = match.end() - extras = [] - - match = OBRACKET(line,p) - if match: - p = match.end() - line, p, extras = scan_list( - DISTRO, CBRACKET, line, p, (1,), "'extra' name" - ) - - line, p, specs = scan_list(VERSION,LINE_END,line,p,(1,2),"version spec") - specs = [(op,safe_version(val)) for op,val in specs] - yield Requirement(project_name, specs, extras) - - -def _sort_dists(dists): - tmp = [(dist.hashcmp,dist) for dist in dists] - tmp.sort() - dists[::-1] = [d for hc,d in tmp] - - - - - - - - - - - - - - - - - -class Requirement: - def __init__(self, project_name, specs, extras): - """DO NOT CALL THIS UNDOCUMENTED METHOD; use Requirement.parse()!""" - self.unsafe_name, project_name = project_name, safe_name(project_name) - self.project_name, self.key = project_name, project_name.lower() - index = [(parse_version(v),state_machine[op],op,v) for op,v in specs] - index.sort() - self.specs = [(op,ver) for parsed,trans,op,ver in index] - self.index, self.extras = index, tuple(map(safe_extra,extras)) - self.hashCmp = ( - self.key, tuple([(op,parsed) for parsed,trans,op,ver in index]), - frozenset(self.extras) - ) - self.__hash = hash(self.hashCmp) - - def __str__(self): - specs = ','.join([''.join(s) for s in self.specs]) - extras = ','.join(self.extras) - if extras: extras = '[%s]' % extras - return '%s%s%s' % (self.project_name, extras, specs) - - def __eq__(self,other): - return isinstance(other,Requirement) and self.hashCmp==other.hashCmp - - def __contains__(self,item): - if isinstance(item,Distribution): - if item.key != self.key: return False - if self.index: item = item.parsed_version # only get if we need it - elif isinstance(item,basestring): - item = parse_version(item) - last = None - for parsed,trans,op,ver in self.index: - action = trans[cmp(item,parsed)] - if action=='F': return False - elif action=='T': return True - elif action=='+': last = True - elif action=='-' or last is None: last = False - if last is None: last = True # no rules encountered - return last - - - def __hash__(self): - return self.__hash - - def __repr__(self): return "Requirement.parse(%r)" % str(self) - - #@staticmethod - def parse(s): - reqs = list(parse_requirements(s)) - if reqs: - if len(reqs)==1: - return reqs[0] - raise ValueError("Expected only one requirement", s) - raise ValueError("No requirements found", s) - - parse = staticmethod(parse) - -state_machine = { - # =>< - '<' : '--T', - '<=': 'T-T', - '>' : 'F+F', - '>=': 'T+F', - '==': 'T..', - '!=': 'F++', -} - - -def _get_mro(cls): - """Get an mro for a type or classic class""" - if not isinstance(cls,type): - class cls(cls,object): pass - return cls.__mro__[1:] - return cls.__mro__ - -def _find_adapter(registry, ob): - """Return an adapter factory for `ob` from `registry`""" - for t in _get_mro(getattr(ob, '__class__', type(ob))): - if t in registry: - return registry[t] - - -def ensure_directory(path): - """Ensure that the parent directory of `path` exists""" - dirname = os.path.dirname(path) - if not os.path.isdir(dirname): - os.makedirs(dirname) - -def split_sections(s): - """Split a string or iterable thereof into (section,content) pairs - - Each ``section`` is a stripped version of the section header ("[section]") - and each ``content`` is a list of stripped lines excluding blank lines and - comment-only lines. If there are any such lines before the first section - header, they're returned in a first ``section`` of ``None``. - """ - section = None - content = [] - for line in yield_lines(s): - if line.startswith("["): - if line.endswith("]"): - if section or content: - yield section, content - section = line[1:-1].strip() - content = [] - else: - raise ValueError("Invalid section heading", line) - else: - content.append(line) - - # wrap up last segment - yield section, content - -def _mkstemp(*args,**kw): - from tempfile import mkstemp - old_open = os.open - try: - os.open = os_open # temporarily bypass sandboxing - return mkstemp(*args,**kw) - finally: - os.open = old_open # and then put it back - - -# Set up global resource manager (deliberately not state-saved) -_manager = ResourceManager() -def _initialize(g): - for name in dir(_manager): - if not name.startswith('_'): - g[name] = getattr(_manager, name) -_initialize(globals()) - -# Prepare the master working set and make the ``require()`` API available -__requires__ = None -_declare_state('object', working_set = WorkingSet()) -try: - # Does the main program list any requirements? - from __main__ import __requires__ -except ImportError: - pass # No: just use the default working set based on sys.path -else: - # Yes: ensure the requirements are met, by prefixing sys.path if necessary - try: - working_set.require(__requires__) - except (VersionConflict, DistributionNotFound): # try it without defaults already on sys.path - working_set = WorkingSet([]) # by starting with an empty path - try: - for dist in working_set.resolve( - parse_requirements(__requires__), Environment() - ): - working_set.add(dist) - except DistributionNotFound: - pass - for entry in sys.path: # add any missing entries from sys.path - if entry not in working_set.entries: - working_set.add_entry(entry) - sys.path[:] = working_set.entries # then copy back to sys.path - -require = working_set.require -iter_entry_points = working_set.iter_entry_points -add_activation_listener = working_set.subscribe -run_script = working_set.run_script -run_main = run_script # backward compatibility -# Activate all distributions already on sys.path, and ensure that -# all distributions added to the working set in the future (e.g. by -# calling ``require()``) will get activated as well. -add_activation_listener(lambda dist: dist.activate()) -working_set.entries=[]; map(working_set.add_entry,sys.path) # match order - diff -Nru tahoe-lafs-1.9.2/setuptools-0.6c16dev3.egg/setuptools/__init__.py tahoe-lafs-1.10.0/setuptools-0.6c16dev3.egg/setuptools/__init__.py --- tahoe-lafs-1.9.2/setuptools-0.6c16dev3.egg/setuptools/__init__.py 2012-05-14 02:07:18.000000000 +0000 +++ tahoe-lafs-1.10.0/setuptools-0.6c16dev3.egg/setuptools/__init__.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,90 +0,0 @@ -"""Extensions to the 'distutils' for large or complex distributions""" -from setuptools.extension import Extension, Library -from setuptools.dist import Distribution, Feature, _get_unpatched -import distutils.core, setuptools.command -from setuptools.depends import Require -from distutils.core import Command as _Command -from distutils.util import convert_path -import os.path -import os -import sys - -__version__ = '0.6c16dev3' -__all__ = [ - 'setup', 'Distribution', 'Feature', 'Command', 'Extension', 'Require', - 'find_packages' -] - -bootstrap_install_from = None - -def find_packages(where='.', exclude=()): - """Return a list all Python packages found within directory 'where' - - 'where' should be supplied as a "cross-platform" (i.e. URL-style) path; it - will be converted to the appropriate local path syntax. 'exclude' is a - sequence of package names to exclude; '*' can be used as a wildcard in the - names, such that 'foo.*' will exclude all subpackages of 'foo' (but not - 'foo' itself). - """ - out = [] - stack=[(convert_path(where), '')] - while stack: - where,prefix = stack.pop(0) - for name in os.listdir(where): - fn = os.path.join(where,name) - if ('.' not in name and os.path.isdir(fn) and - os.path.isfile(os.path.join(fn,'__init__.py')) - ): - out.append(prefix+name); stack.append((fn,prefix+name+'.')) - for pat in list(exclude)+['ez_setup']: - from fnmatch import fnmatchcase - out = [item for item in out if not fnmatchcase(item,pat)] - return out - -setup = distutils.core.setup - -_Command = _get_unpatched(_Command) - -class Command(_Command): - __doc__ = _Command.__doc__ - - command_consumes_arguments = False - - def __init__(self, dist, **kw): - # Add support for keyword arguments - _Command.__init__(self,dist) - for k,v in kw.items(): - setattr(self,k,v) - - def reinitialize_command(self, command, reinit_subcommands=0, **kw): - cmd = _Command.reinitialize_command(self, command, reinit_subcommands) - for k,v in kw.items(): - setattr(cmd,k,v) # update command with keywords - return cmd - -import distutils.core -distutils.core.Command = Command # we can't patch distutils.cmd, alas - -def findall(dir = os.curdir): - """Find all files under 'dir' and return the list of full filenames - (relative to 'dir'). - """ - all_files = [] - for base, dirs, files in os.walk(dir): - if base==os.curdir or base.startswith(os.curdir+os.sep): - base = base[2:] - if base: - files = [os.path.join(base, f) for f in files] - all_files.extend(filter(os.path.isfile, files)) - return all_files - -import distutils.filelist -distutils.filelist.findall = findall # fix findall bug in distutils. - - -# sys.dont_write_bytecode was introduced in Python 2.6. -if ((hasattr(sys, "dont_write_bytecode") and sys.dont_write_bytecode) or - (not hasattr(sys, "dont_write_bytecode") and os.environ.get("PYTHONDONTWRITEBYTECODE"))): - _dont_write_bytecode = True -else: - _dont_write_bytecode = False diff -Nru tahoe-lafs-1.9.2/setuptools-0.6c16dev3.egg/setuptools/archive_util.py tahoe-lafs-1.10.0/setuptools-0.6c16dev3.egg/setuptools/archive_util.py --- tahoe-lafs-1.9.2/setuptools-0.6c16dev3.egg/setuptools/archive_util.py 2012-05-14 02:07:18.000000000 +0000 +++ tahoe-lafs-1.10.0/setuptools-0.6c16dev3.egg/setuptools/archive_util.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,205 +0,0 @@ -"""Utilities for extracting common archive formats""" - - -__all__ = [ - "unpack_archive", "unpack_zipfile", "unpack_tarfile", "default_filter", - "UnrecognizedFormat", "extraction_drivers", "unpack_directory", -] - -import zipfile, tarfile, os, shutil -from pkg_resources import ensure_directory -from distutils.errors import DistutilsError - -class UnrecognizedFormat(DistutilsError): - """Couldn't recognize the archive type""" - -def default_filter(src,dst): - """The default progress/filter callback; returns True for all files""" - return dst - - - - - - - - - - - - - - - - - - - - - - - -def unpack_archive(filename, extract_dir, progress_filter=default_filter, - drivers=None -): - """Unpack `filename` to `extract_dir`, or raise ``UnrecognizedFormat`` - - `progress_filter` is a function taking two arguments: a source path - internal to the archive ('/'-separated), and a filesystem path where it - will be extracted. The callback must return the desired extract path - (which may be the same as the one passed in), or else ``None`` to skip - that file or directory. The callback can thus be used to report on the - progress of the extraction, as well as to filter the items extracted or - alter their extraction paths. - - `drivers`, if supplied, must be a non-empty sequence of functions with the - same signature as this function (minus the `drivers` argument), that raise - ``UnrecognizedFormat`` if they do not support extracting the designated - archive type. The `drivers` are tried in sequence until one is found that - does not raise an error, or until all are exhausted (in which case - ``UnrecognizedFormat`` is raised). If you do not supply a sequence of - drivers, the module's ``extraction_drivers`` constant will be used, which - means that ``unpack_zipfile`` and ``unpack_tarfile`` will be tried, in that - order. - """ - for driver in drivers or extraction_drivers: - try: - driver(filename, extract_dir, progress_filter) - except UnrecognizedFormat: - continue - else: - return - else: - raise UnrecognizedFormat( - "Not a recognized archive type: %s" % filename - ) - - - - - - - -def unpack_directory(filename, extract_dir, progress_filter=default_filter): - """"Unpack" a directory, using the same interface as for archives - - Raises ``UnrecognizedFormat`` if `filename` is not a directory - """ - if not os.path.isdir(filename): - raise UnrecognizedFormat("%s is not a directory" % (filename,)) - - paths = {filename:('',extract_dir)} - for base, dirs, files in os.walk(filename): - src,dst = paths[base] - for d in dirs: - paths[os.path.join(base,d)] = src+d+'/', os.path.join(dst,d) - for f in files: - name = src+f - target = os.path.join(dst,f) - target = progress_filter(src+f, target) - if not target: - continue # skip non-files - ensure_directory(target) - f = os.path.join(base,f) - shutil.copyfile(f, target) - shutil.copystat(f, target) - - - - - - - - - - - - - - - - - - -def unpack_zipfile(filename, extract_dir, progress_filter=default_filter): - """Unpack zip `filename` to `extract_dir` - - Raises ``UnrecognizedFormat`` if `filename` is not a zipfile (as determined - by ``zipfile.is_zipfile()``). See ``unpack_archive()`` for an explanation - of the `progress_filter` argument. - """ - - if not zipfile.is_zipfile(filename): - raise UnrecognizedFormat("%s is not a zip file" % (filename,)) - - z = zipfile.ZipFile(filename) - try: - for info in z.infolist(): - name = info.filename - - # don't extract absolute paths or ones with .. in them - if name.startswith('/') or '..' in name: - continue - - target = os.path.join(extract_dir, *name.split('/')) - target = progress_filter(name, target) - if not target: - continue - if name.endswith('/'): - # directory - ensure_directory(target) - else: - # file - ensure_directory(target) - data = z.read(info.filename) - f = open(target,'wb') - try: - f.write(data) - finally: - f.close() - del data - finally: - z.close() - - -def unpack_tarfile(filename, extract_dir, progress_filter=default_filter): - """Unpack tar/tar.gz/tar.bz2 `filename` to `extract_dir` - - Raises ``UnrecognizedFormat`` if `filename` is not a tarfile (as determined - by ``tarfile.open()``). See ``unpack_archive()`` for an explanation - of the `progress_filter` argument. - """ - - try: - tarobj = tarfile.open(filename) - except tarfile.TarError: - raise UnrecognizedFormat( - "%s is not a compressed or uncompressed tar file" % (filename,) - ) - - try: - tarobj.chown = lambda *args: None # don't do any chowning! - for member in tarobj: - if member.isfile() or member.isdir(): - name = member.name - # don't extract absolute paths or ones with .. in them - if not name.startswith('/') and '..' not in name: - dst = os.path.join(extract_dir, *name.split('/')) - dst = progress_filter(name, dst) - if dst: - if dst.endswith(os.sep): - dst = dst[:-1] - try: - tarobj._extract_member(member,dst) # XXX Ugh - except tarfile.ExtractError: - pass # chown/chmod/mkfifo/mknode/makedev failed - return True - finally: - tarobj.close() - - - - -extraction_drivers = unpack_directory, unpack_zipfile, unpack_tarfile - - diff -Nru tahoe-lafs-1.9.2/setuptools-0.6c16dev3.egg/setuptools/command/__init__.py tahoe-lafs-1.10.0/setuptools-0.6c16dev3.egg/setuptools/command/__init__.py --- tahoe-lafs-1.9.2/setuptools-0.6c16dev3.egg/setuptools/command/__init__.py 2012-05-14 02:07:18.000000000 +0000 +++ tahoe-lafs-1.10.0/setuptools-0.6c16dev3.egg/setuptools/command/__init__.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,20 +0,0 @@ -__all__ = [ - 'alias', 'bdist_egg', 'bdist_rpm', 'build_ext', 'build_py', 'develop', - 'easy_install', 'egg_info', 'install', 'install_lib', 'rotate', 'saveopts', - 'sdist', 'setopt', 'test', 'upload', 'install_egg_info', 'install_scripts', - 'register', 'bdist_wininst', 'scriptsetup', -] - -import sys -if sys.version>='2.5': - # In Python 2.5 and above, distutils includes its own upload command - __all__.remove('upload') - - -from distutils.command.bdist import bdist - -if 'egg' not in bdist.format_commands: - bdist.format_command['egg'] = ('bdist_egg', "Python .egg file") - bdist.format_commands.append('egg') - -del bdist, sys diff -Nru tahoe-lafs-1.9.2/setuptools-0.6c16dev3.egg/setuptools/command/alias.py tahoe-lafs-1.10.0/setuptools-0.6c16dev3.egg/setuptools/command/alias.py --- tahoe-lafs-1.9.2/setuptools-0.6c16dev3.egg/setuptools/command/alias.py 2012-05-14 02:07:18.000000000 +0000 +++ tahoe-lafs-1.10.0/setuptools-0.6c16dev3.egg/setuptools/command/alias.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,79 +0,0 @@ -import distutils, os -from setuptools import Command -from distutils.util import convert_path -from distutils import log -from distutils.errors import * -from setuptools.command.setopt import edit_config, option_base, config_file - -def shquote(arg): - """Quote an argument for later parsing by shlex.split()""" - for c in '"', "'", "\\", "#": - if c in arg: return repr(arg) - if arg.split()!=[arg]: - return repr(arg) - return arg - - -class alias(option_base): - """Define a shortcut that invokes one or more commands""" - - description = "define a shortcut to invoke one or more commands" - command_consumes_arguments = True - - user_options = [ - ('remove', 'r', 'remove (unset) the alias'), - ] + option_base.user_options - - boolean_options = option_base.boolean_options + ['remove'] - - def initialize_options(self): - option_base.initialize_options(self) - self.args = None - self.remove = None - - def finalize_options(self): - option_base.finalize_options(self) - if self.remove and len(self.args)!=1: - raise DistutilsOptionError( - "Must specify exactly one argument (the alias name) when " - "using --remove" - ) - - def run(self): - aliases = self.distribution.get_option_dict('aliases') - - if not self.args: - print "Command Aliases" - print "---------------" - for alias in aliases: - print "setup.py alias", format_alias(alias, aliases) - return - - elif len(self.args)==1: - alias, = self.args - if self.remove: - command = None - elif alias in aliases: - print "setup.py alias", format_alias(alias, aliases) - return - else: - print "No alias definition found for %r" % alias - return - else: - alias = self.args[0] - command = ' '.join(map(shquote,self.args[1:])) - - edit_config(self.filename, {'aliases': {alias:command}}, self.dry_run) - - -def format_alias(name, aliases): - source, command = aliases[name] - if source == config_file('global'): - source = '--global-config ' - elif source == config_file('user'): - source = '--user-config ' - elif source == config_file('local'): - source = '' - else: - source = '--filename=%r' % source - return source+name+' '+command diff -Nru tahoe-lafs-1.9.2/setuptools-0.6c16dev3.egg/setuptools/command/bdist_egg.py tahoe-lafs-1.10.0/setuptools-0.6c16dev3.egg/setuptools/command/bdist_egg.py --- tahoe-lafs-1.9.2/setuptools-0.6c16dev3.egg/setuptools/command/bdist_egg.py 2012-05-14 02:07:18.000000000 +0000 +++ tahoe-lafs-1.10.0/setuptools-0.6c16dev3.egg/setuptools/command/bdist_egg.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,533 +0,0 @@ -"""setuptools.command.bdist_egg - -Build .egg distributions""" - -# This module should be kept compatible with Python 2.3 -import sys, os, marshal -from setuptools import Command -from distutils.dir_util import remove_tree, mkpath -from distutils.sysconfig import get_python_version, get_python_lib -from distutils import log -from distutils.errors import DistutilsSetupError -from pkg_resources import get_build_platform, Distribution, ensure_directory -from pkg_resources import EntryPoint -from types import CodeType -from setuptools.extension import Library - -def strip_module(filename): - if '.' in filename: - filename = os.path.splitext(filename)[0] - if filename.endswith('module'): - filename = filename[:-6] - return filename - -def write_stub(resource, pyfile): - f = open(pyfile,'w') - f.write('\n'.join([ - "def __bootstrap__():", - " global __bootstrap__, __loader__, __file__", - " import sys, pkg_resources, imp", - " __file__ = pkg_resources.resource_filename(__name__,%r)" - % resource, - " __loader__ = None; del __bootstrap__, __loader__", - " imp.load_dynamic(__name__,__file__)", - "__bootstrap__()", - "" # terminal \n - ])) - f.close() - -# stub __init__.py for packages distributed without one -NS_PKG_STUB = '__import__("pkg_resources").declare_namespace(__name__)' - -class bdist_egg(Command): - - description = "create an \"egg\" distribution" - - user_options = [ - ('bdist-dir=', 'b', - "temporary directory for creating the distribution"), - ('plat-name=', 'p', - "platform name to embed in generated filenames " - "(default: %s)" % get_build_platform()), - ('exclude-source-files', None, - "remove all .py files from the generated egg"), - ('keep-temp', 'k', - "keep the pseudo-installation tree around after " + - "creating the distribution archive"), - ('dist-dir=', 'd', - "directory to put final built distributions in"), - ('skip-build', None, - "skip rebuilding everything (for testing/debugging)"), - ] - - boolean_options = [ - 'keep-temp', 'skip-build', 'exclude-source-files' - ] - - - - - - - - - - - - - - - - - - def initialize_options (self): - self.bdist_dir = None - self.plat_name = None - self.keep_temp = 0 - self.dist_dir = None - self.skip_build = 0 - self.egg_output = None - self.exclude_source_files = None - - - def finalize_options(self): - ei_cmd = self.ei_cmd = self.get_finalized_command("egg_info") - self.egg_info = ei_cmd.egg_info - - if self.bdist_dir is None: - bdist_base = self.get_finalized_command('bdist').bdist_base - self.bdist_dir = os.path.join(bdist_base, 'egg') - - if self.plat_name is None: - self.plat_name = get_build_platform() - - self.set_undefined_options('bdist',('dist_dir', 'dist_dir')) - - if self.egg_output is None: - - # Compute filename of the output egg - basename = Distribution( - None, None, ei_cmd.egg_name, ei_cmd.egg_version, - get_python_version(), - self.distribution.has_ext_modules() and self.plat_name - ).egg_name() - - self.egg_output = os.path.join(self.dist_dir, basename+'.egg') - - - - - - - - - def do_install_data(self): - # Hack for packages that install data to install's --install-lib - self.get_finalized_command('install').install_lib = self.bdist_dir - - site_packages = os.path.normcase(os.path.realpath(get_python_lib())) - old, self.distribution.data_files = self.distribution.data_files,[] - - for item in old: - if isinstance(item,tuple) and len(item)==2: - if os.path.isabs(item[0]): - realpath = os.path.realpath(item[0]) - normalized = os.path.normcase(realpath) - if normalized==site_packages or normalized.startswith( - site_packages+os.sep - ): - item = realpath[len(site_packages)+1:], item[1] - # XXX else: raise ??? - self.distribution.data_files.append(item) - - try: - log.info("installing package data to %s" % self.bdist_dir) - self.call_command('install_data', force=0, root=None) - finally: - self.distribution.data_files = old - - - def get_outputs(self): - return [self.egg_output] - - - def call_command(self,cmdname,**kw): - """Invoke reinitialized command `cmdname` with keyword args""" - for dirname in INSTALL_DIRECTORY_ATTRS: - kw.setdefault(dirname,self.bdist_dir) - kw.setdefault('skip_build',self.skip_build) - kw.setdefault('dry_run', self.dry_run) - cmd = self.reinitialize_command(cmdname, **kw) - self.run_command(cmdname) - return cmd - - - def run(self): - # Generate metadata first - self.run_command("egg_info") - # We run install_lib before install_data, because some data hacks - # pull their data path from the install_lib command. - log.info("installing library code to %s" % self.bdist_dir) - instcmd = self.get_finalized_command('install') - old_root = instcmd.root; instcmd.root = None - if self.distribution.has_c_libraries() and not self.skip_build: - self.run_command('build_clib') - cmd = self.call_command('install_lib', warn_dir=0) - instcmd.root = old_root - - all_outputs, ext_outputs = self.get_ext_outputs() - self.stubs = [] - to_compile = [] - for (p,ext_name) in enumerate(ext_outputs): - filename,ext = os.path.splitext(ext_name) - pyfile = os.path.join(self.bdist_dir, strip_module(filename)+'.py') - self.stubs.append(pyfile) - log.info("creating stub loader for %s" % ext_name) - if not self.dry_run: - write_stub(os.path.basename(ext_name), pyfile) - to_compile.append(pyfile) - ext_outputs[p] = ext_name.replace(os.sep,'/') - - to_compile.extend(self.make_init_files()) - if to_compile: - cmd.byte_compile(to_compile) - if self.distribution.data_files: - self.do_install_data() - - # Make the EGG-INFO directory - archive_root = self.bdist_dir - egg_info = os.path.join(archive_root,'EGG-INFO') - self.mkpath(egg_info) - if self.distribution.scripts: - script_dir = os.path.join(egg_info, 'scripts') - log.info("installing scripts to %s" % script_dir) - self.call_command('install_scripts',install_dir=script_dir,no_ep=1) - - self.copy_metadata_to(egg_info) - native_libs = os.path.join(egg_info, "native_libs.txt") - if all_outputs: - log.info("writing %s" % native_libs) - if not self.dry_run: - ensure_directory(native_libs) - libs_file = open(native_libs, 'wt') - libs_file.write('\n'.join(all_outputs)) - libs_file.write('\n') - libs_file.close() - elif os.path.isfile(native_libs): - log.info("removing %s" % native_libs) - if not self.dry_run: - os.unlink(native_libs) - - write_safety_flag( - os.path.join(archive_root,'EGG-INFO'), self.zip_safe() - ) - - if os.path.exists(os.path.join(self.egg_info,'depends.txt')): - log.warn( - "WARNING: 'depends.txt' will not be used by setuptools 0.6!\n" - "Use the install_requires/extras_require setup() args instead." - ) - - if self.exclude_source_files: - self.zap_pyfiles() - - # Make the archive - make_zipfile(self.egg_output, archive_root, verbose=self.verbose, - dry_run=self.dry_run, mode=self.gen_header()) - if not self.keep_temp: - remove_tree(self.bdist_dir, dry_run=self.dry_run) - - # Add to 'Distribution.dist_files' so that the "upload" command works - getattr(self.distribution,'dist_files',[]).append( - ('bdist_egg',get_python_version(),self.egg_output)) - - - - - def zap_pyfiles(self): - log.info("Removing .py files from temporary directory") - for base,dirs,files in walk_egg(self.bdist_dir): - for name in files: - if name.endswith('.py'): - path = os.path.join(base,name) - log.debug("Deleting %s", path) - os.unlink(path) - - def zip_safe(self): - safe = getattr(self.distribution,'zip_safe',None) - if safe is not None: - return safe - log.warn("zip_safe flag not set; analyzing archive contents...") - return analyze_egg(self.bdist_dir, self.stubs) - - def make_init_files(self): - """Create missing package __init__ files""" - init_files = [] - for base,dirs,files in walk_egg(self.bdist_dir): - if base==self.bdist_dir: - # don't put an __init__ in the root - continue - for name in files: - if name.endswith('.py'): - if '__init__.py' not in files: - pkg = base[len(self.bdist_dir)+1:].replace(os.sep,'.') - if self.distribution.has_contents_for(pkg): - log.warn("Creating missing __init__.py for %s",pkg) - filename = os.path.join(base,'__init__.py') - if not self.dry_run: - f = open(filename,'w'); f.write(NS_PKG_STUB) - f.close() - init_files.append(filename) - break - else: - # not a package, don't traverse to subdirectories - dirs[:] = [] - - return init_files - - def gen_header(self): - epm = EntryPoint.parse_map(self.distribution.entry_points or '') - ep = epm.get('setuptools.installation',{}).get('eggsecutable') - if ep is None: - return 'w' # not an eggsecutable, do it the usual way. - - if not ep.attrs or ep.extras: - raise DistutilsSetupError( - "eggsecutable entry point (%r) cannot have 'extras' " - "or refer to a module" % (ep,) - ) - - pyver = sys.version[:3] - pkg = ep.module_name - full = '.'.join(ep.attrs) - base = ep.attrs[0] - basename = os.path.basename(self.egg_output) - - header = ( - "#!/bin/sh\n" - 'if [ `basename $0` = "%(basename)s" ]\n' - 'then exec python%(pyver)s -c "' - "import sys, os; sys.path.insert(0, os.path.abspath('$0')); " - "from %(pkg)s import %(base)s; sys.exit(%(full)s())" - '" "$@"\n' - 'else\n' - ' echo $0 is not the correct name for this egg file.\n' - ' echo Please rename it back to %(basename)s and try again.\n' - ' exec false\n' - 'fi\n' - - ) % locals() - - if not self.dry_run: - mkpath(os.path.dirname(self.egg_output), dry_run=self.dry_run) - f = open(self.egg_output, 'w') - f.write(header) - f.close() - return 'a' - - - def copy_metadata_to(self, target_dir): - prefix = os.path.join(self.egg_info,'') - for path in self.ei_cmd.filelist.files: - if path.startswith(prefix): - target = os.path.join(target_dir, path[len(prefix):]) - ensure_directory(target) - self.copy_file(path, target) - - def get_ext_outputs(self): - """Get a list of relative paths to C extensions in the output distro""" - - all_outputs = [] - ext_outputs = [] - - paths = {self.bdist_dir:''} - for base, dirs, files in os.walk(self.bdist_dir): - for filename in files: - if os.path.splitext(filename)[1].lower() in NATIVE_EXTENSIONS: - all_outputs.append(paths[base]+filename) - for filename in dirs: - paths[os.path.join(base,filename)] = paths[base]+filename+'/' - - if self.distribution.has_ext_modules(): - build_cmd = self.get_finalized_command('build_ext') - for ext in build_cmd.extensions: - if isinstance(ext,Library): - continue - fullname = build_cmd.get_ext_fullname(ext.name) - filename = build_cmd.get_ext_filename(fullname) - if not os.path.basename(filename).startswith('dl-'): - if os.path.exists(os.path.join(self.bdist_dir,filename)): - ext_outputs.append(filename) - - return all_outputs, ext_outputs - - -NATIVE_EXTENSIONS = dict.fromkeys('.dll .so .dylib .pyd'.split()) - - - - -def walk_egg(egg_dir): - """Walk an unpacked egg's contents, skipping the metadata directory""" - walker = os.walk(egg_dir) - base,dirs,files = walker.next() - if 'EGG-INFO' in dirs: - dirs.remove('EGG-INFO') - yield base,dirs,files - for bdf in walker: - yield bdf - -def analyze_egg(egg_dir, stubs): - # check for existing flag in EGG-INFO - for flag,fn in safety_flags.items(): - if os.path.exists(os.path.join(egg_dir,'EGG-INFO',fn)): - return flag - if not can_scan(): return False - safe = True - for base, dirs, files in walk_egg(egg_dir): - for name in files: - if name.endswith('.py') or name.endswith('.pyw'): - continue - elif name.endswith('.pyc') or name.endswith('.pyo'): - # always scan, even if we already know we're not safe - safe = scan_module(egg_dir, base, name, stubs) and safe - return safe - -def write_safety_flag(egg_dir, safe): - # Write or remove zip safety flag file(s) - for flag,fn in safety_flags.items(): - fn = os.path.join(egg_dir, fn) - if os.path.exists(fn): - if safe is None or bool(safe)!=flag: - os.unlink(fn) - elif safe is not None and bool(safe)==flag: - f=open(fn,'wb'); f.write('\n'); f.close() - -safety_flags = { - True: 'zip-safe', - False: 'not-zip-safe', -} - -def scan_module(egg_dir, base, name, stubs): - """Check whether module possibly uses unsafe-for-zipfile stuff""" - - filename = os.path.join(base,name) - if filename[:-1] in stubs: - return True # Extension module - pkg = base[len(egg_dir)+1:].replace(os.sep,'.') - module = pkg+(pkg and '.' or '')+os.path.splitext(name)[0] - f = open(filename,'rb'); f.read(8) # skip magic & date - code = marshal.load(f); f.close() - safe = True - symbols = dict.fromkeys(iter_symbols(code)) - for bad in ['__file__', '__path__']: - if bad in symbols: - log.warn("%s: module references %s", module, bad) - safe = False - if 'inspect' in symbols: - for bad in [ - 'getsource', 'getabsfile', 'getsourcefile', 'getfile' - 'getsourcelines', 'findsource', 'getcomments', 'getframeinfo', - 'getinnerframes', 'getouterframes', 'stack', 'trace' - ]: - if bad in symbols: - log.warn("%s: module MAY be using inspect.%s", module, bad) - safe = False - if '__name__' in symbols and '__main__' in symbols and '.' not in module: - if sys.version[:3]=="2.4": # -m works w/zipfiles in 2.5 - log.warn("%s: top-level module may be 'python -m' script", module) - safe = False - return safe - -def iter_symbols(code): - """Yield names and strings used by `code` and its nested code objects""" - for name in code.co_names: yield name - for const in code.co_consts: - if isinstance(const,basestring): - yield const - elif isinstance(const,CodeType): - for name in iter_symbols(const): - yield name - -def can_scan(): - if not sys.platform.startswith('java') and sys.platform != 'cli': - # CPython, PyPy, etc. - return True - log.warn("Unable to analyze compiled code on this platform.") - log.warn("Please ask the author to include a 'zip_safe'" - " setting (either True or False) in the package's setup.py") - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -# Attribute names of options for commands that might need to be convinced to -# install to the egg build directory - -INSTALL_DIRECTORY_ATTRS = [ - 'install_lib', 'install_dir', 'install_data', 'install_base' -] - -def make_zipfile(zip_filename, base_dir, verbose=0, dry_run=0, compress=None, - mode='w' -): - """Create a zip file from all the files under 'base_dir'. The output - zip file will be named 'base_dir' + ".zip". Uses either the "zipfile" - Python module (if available) or the InfoZIP "zip" utility (if installed - and found on the default search path). If neither tool is available, - raises DistutilsExecError. Returns the name of the output zip file. - """ - import zipfile - mkpath(os.path.dirname(zip_filename), dry_run=dry_run) - log.info("creating '%s' and adding '%s' to it", zip_filename, base_dir) - - def visit(z, dirname, names): - for name in names: - path = os.path.normpath(os.path.join(dirname, name)) - if os.path.isfile(path): - p = path[len(base_dir)+1:] - if not dry_run: - z.write(path, p) - log.debug("adding '%s'" % p) - - if compress is None: - compress = (sys.version>="2.4") # avoid 2.3 zipimport bug when 64 bits - - compression = [zipfile.ZIP_STORED, zipfile.ZIP_DEFLATED][bool(compress)] - if not dry_run: - z = zipfile.ZipFile(zip_filename, mode, compression=compression) - os.path.walk(base_dir, visit, z) - z.close() - else: - os.path.walk(base_dir, visit, None) - return zip_filename -# diff -Nru tahoe-lafs-1.9.2/setuptools-0.6c16dev3.egg/setuptools/command/bdist_rpm.py tahoe-lafs-1.10.0/setuptools-0.6c16dev3.egg/setuptools/command/bdist_rpm.py --- tahoe-lafs-1.9.2/setuptools-0.6c16dev3.egg/setuptools/command/bdist_rpm.py 2012-05-14 02:07:18.000000000 +0000 +++ tahoe-lafs-1.10.0/setuptools-0.6c16dev3.egg/setuptools/command/bdist_rpm.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,82 +0,0 @@ -# This is just a kludge so that bdist_rpm doesn't guess wrong about the -# distribution name and version, if the egg_info command is going to alter -# them, another kludge to allow you to build old-style non-egg RPMs, and -# finally, a kludge to track .rpm files for uploading when run on Python <2.5. - -from distutils.command.bdist_rpm import bdist_rpm as _bdist_rpm -import sys, os - -class bdist_rpm(_bdist_rpm): - - def initialize_options(self): - _bdist_rpm.initialize_options(self) - self.no_egg = None - - if sys.version<"2.5": - # Track for uploading any .rpm file(s) moved to self.dist_dir - def move_file(self, src, dst, level=1): - _bdist_rpm.move_file(self, src, dst, level) - if dst==self.dist_dir and src.endswith('.rpm'): - getattr(self.distribution,'dist_files',[]).append( - ('bdist_rpm', - src.endswith('.src.rpm') and 'any' or sys.version[:3], - os.path.join(dst, os.path.basename(src))) - ) - - def run(self): - self.run_command('egg_info') # ensure distro name is up-to-date - _bdist_rpm.run(self) - - - - - - - - - - - - - - def _make_spec_file(self): - version = self.distribution.get_version() - rpmversion = version.replace('-','_') - spec = _bdist_rpm._make_spec_file(self) - line23 = '%define version '+version - line24 = '%define version '+rpmversion - spec = [ - line.replace( - "Source0: %{name}-%{version}.tar", - "Source0: %{name}-%{unmangled_version}.tar" - ).replace( - "setup.py install ", - "setup.py install --single-version-externally-managed " - ).replace( - "%setup", - "%setup -n %{name}-%{unmangled_version}" - ).replace(line23,line24) - for line in spec - ] - spec.insert(spec.index(line24)+1, "%define unmangled_version "+version) - return spec - - - - - - - - - - - - - - - - - - - - diff -Nru tahoe-lafs-1.9.2/setuptools-0.6c16dev3.egg/setuptools/command/bdist_wininst.py tahoe-lafs-1.10.0/setuptools-0.6c16dev3.egg/setuptools/command/bdist_wininst.py --- tahoe-lafs-1.9.2/setuptools-0.6c16dev3.egg/setuptools/command/bdist_wininst.py 2012-05-14 02:07:18.000000000 +0000 +++ tahoe-lafs-1.10.0/setuptools-0.6c16dev3.egg/setuptools/command/bdist_wininst.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,82 +0,0 @@ -from distutils.command.bdist_wininst import bdist_wininst as _bdist_wininst -import os, sys - -class bdist_wininst(_bdist_wininst): - _good_upload = _bad_upload = None - - def create_exe(self, arcname, fullname, bitmap=None): - _bdist_wininst.create_exe(self, arcname, fullname, bitmap) - installer_name = self.get_installer_filename(fullname) - if self.target_version: - pyversion = self.target_version - # fix 2.5+ bdist_wininst ignoring --target-version spec - self._bad_upload = ('bdist_wininst', 'any', installer_name) - else: - pyversion = 'any' - self._good_upload = ('bdist_wininst', pyversion, installer_name) - - def _fix_upload_names(self): - good, bad = self._good_upload, self._bad_upload - dist_files = getattr(self.distribution, 'dist_files', []) - if bad in dist_files: - dist_files.remove(bad) - if good not in dist_files: - dist_files.append(good) - - def reinitialize_command (self, command, reinit_subcommands=0): - cmd = self.distribution.reinitialize_command( - command, reinit_subcommands) - if command in ('install', 'install_lib'): - cmd.install_lib = None # work around distutils bug - return cmd - - def run(self): - self._is_running = True - try: - _bdist_wininst.run(self) - self._fix_upload_names() - finally: - self._is_running = False - - - if not hasattr(_bdist_wininst, 'get_installer_filename'): - def get_installer_filename(self, fullname): - # Factored out to allow overriding in subclasses - if self.target_version: - # if we create an installer for a specific python version, - # it's better to include this in the name - installer_name = os.path.join(self.dist_dir, - "%s.win32-py%s.exe" % - (fullname, self.target_version)) - else: - installer_name = os.path.join(self.dist_dir, - "%s.win32.exe" % fullname) - return installer_name - # get_installer_filename() - - - - - - - - - - - - - - - - - - - - - - - - - - - diff -Nru tahoe-lafs-1.9.2/setuptools-0.6c16dev3.egg/setuptools/command/build_ext.py tahoe-lafs-1.10.0/setuptools-0.6c16dev3.egg/setuptools/command/build_ext.py --- tahoe-lafs-1.9.2/setuptools-0.6c16dev3.egg/setuptools/command/build_ext.py 2012-05-14 02:07:18.000000000 +0000 +++ tahoe-lafs-1.10.0/setuptools-0.6c16dev3.egg/setuptools/command/build_ext.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,285 +0,0 @@ -from distutils.command.build_ext import build_ext as _du_build_ext -try: - # Attempt to use Pyrex for building extensions, if available - from Pyrex.Distutils.build_ext import build_ext as _build_ext -except ImportError: - _build_ext = _du_build_ext - -import os, sys -from distutils.file_util import copy_file -from setuptools.extension import Library -from distutils.ccompiler import new_compiler -from distutils.sysconfig import customize_compiler, get_config_var -get_config_var("LDSHARED") # make sure _config_vars is initialized -from distutils.sysconfig import _config_vars -from distutils import log -from distutils.errors import * - -have_rtld = False -use_stubs = False -libtype = 'shared' - -if sys.platform == "darwin": - use_stubs = True -elif os.name != 'nt': - try: - from dl import RTLD_NOW - have_rtld = True - use_stubs = True - except ImportError: - pass - -def if_dl(s): - if have_rtld: - return s - return '' - - - - - - -class build_ext(_build_ext): - def run(self): - """Build extensions in build directory, then copy if --inplace""" - old_inplace, self.inplace = self.inplace, 0 - _build_ext.run(self) - self.inplace = old_inplace - if old_inplace: - self.copy_extensions_to_source() - - def copy_extensions_to_source(self): - build_py = self.get_finalized_command('build_py') - for ext in self.extensions: - fullname = self.get_ext_fullname(ext.name) - filename = self.get_ext_filename(fullname) - modpath = fullname.split('.') - package = '.'.join(modpath[:-1]) - package_dir = build_py.get_package_dir(package) - dest_filename = os.path.join(package_dir,os.path.basename(filename)) - src_filename = os.path.join(self.build_lib,filename) - - # Always copy, even if source is older than destination, to ensure - # that the right extensions for the current Python/platform are - # used. - copy_file( - src_filename, dest_filename, verbose=self.verbose, - dry_run=self.dry_run - ) - if ext._needs_stub: - self.write_stub(package_dir or os.curdir, ext, True) - - - if _build_ext is not _du_build_ext and not hasattr(_build_ext,'pyrex_sources'): - # Workaround for problems using some Pyrex versions w/SWIG and/or 2.4 - def swig_sources(self, sources, *otherargs): - # first do any Pyrex processing - sources = _build_ext.swig_sources(self, sources) or sources - # Then do any actual SWIG stuff on the remainder - return _du_build_ext.swig_sources(self, sources, *otherargs) - - - - def get_ext_filename(self, fullname): - filename = _build_ext.get_ext_filename(self,fullname) - if fullname in self.ext_map: - ext = self.ext_map[fullname] - if isinstance(ext,Library): - fn, ext = os.path.splitext(filename) - return self.shlib_compiler.library_filename(fn,libtype) - elif use_stubs and ext._links_to_dynamic: - d,fn = os.path.split(filename) - return os.path.join(d,'dl-'+fn) - return filename - - def initialize_options(self): - _build_ext.initialize_options(self) - self.shlib_compiler = None - self.shlibs = [] - self.ext_map = {} - - def finalize_options(self): - _build_ext.finalize_options(self) - self.extensions = self.extensions or [] - self.check_extensions_list(self.extensions) - self.shlibs = [ext for ext in self.extensions - if isinstance(ext,Library)] - if self.shlibs: - self.setup_shlib_compiler() - for ext in self.extensions: - ext._full_name = self.get_ext_fullname(ext.name) - for ext in self.extensions: - fullname = ext._full_name - self.ext_map[fullname] = ext - ltd = ext._links_to_dynamic = \ - self.shlibs and self.links_to_dynamic(ext) or False - ext._needs_stub = ltd and use_stubs and not isinstance(ext,Library) - filename = ext._file_name = self.get_ext_filename(fullname) - libdir = os.path.dirname(os.path.join(self.build_lib,filename)) - if ltd and libdir not in ext.library_dirs: - ext.library_dirs.append(libdir) - if ltd and use_stubs and os.curdir not in ext.runtime_library_dirs: - ext.runtime_library_dirs.append(os.curdir) - - def setup_shlib_compiler(self): - compiler = self.shlib_compiler = new_compiler( - compiler=self.compiler, dry_run=self.dry_run, force=self.force - ) - if sys.platform == "darwin": - tmp = _config_vars.copy() - try: - # XXX Help! I don't have any idea whether these are right... - _config_vars['LDSHARED'] = "gcc -Wl,-x -dynamiclib -undefined dynamic_lookup" - _config_vars['CCSHARED'] = " -dynamiclib" - _config_vars['SO'] = ".dylib" - customize_compiler(compiler) - finally: - _config_vars.clear() - _config_vars.update(tmp) - else: - customize_compiler(compiler) - - if self.include_dirs is not None: - compiler.set_include_dirs(self.include_dirs) - if self.define is not None: - # 'define' option is a list of (name,value) tuples - for (name,value) in self.define: - compiler.define_macro(name, value) - if self.undef is not None: - for macro in self.undef: - compiler.undefine_macro(macro) - if self.libraries is not None: - compiler.set_libraries(self.libraries) - if self.library_dirs is not None: - compiler.set_library_dirs(self.library_dirs) - if self.rpath is not None: - compiler.set_runtime_library_dirs(self.rpath) - if self.link_objects is not None: - compiler.set_link_objects(self.link_objects) - - # hack so distutils' build_extension() builds a library instead - compiler.link_shared_object = link_shared_object.__get__(compiler) - - - - def get_export_symbols(self, ext): - if isinstance(ext,Library): - return ext.export_symbols - return _build_ext.get_export_symbols(self,ext) - - def build_extension(self, ext): - _compiler = self.compiler - try: - if isinstance(ext,Library): - self.compiler = self.shlib_compiler - _build_ext.build_extension(self,ext) - if ext._needs_stub: - self.write_stub( - self.get_finalized_command('build_py').build_lib, ext - ) - finally: - self.compiler = _compiler - - def links_to_dynamic(self, ext): - """Return true if 'ext' links to a dynamic lib in the same package""" - # XXX this should check to ensure the lib is actually being built - # XXX as dynamic, and not just using a locally-found version or a - # XXX static-compiled version - libnames = dict.fromkeys([lib._full_name for lib in self.shlibs]) - pkg = '.'.join(ext._full_name.split('.')[:-1]+['']) - for libname in ext.libraries: - if pkg+libname in libnames: return True - return False - - def get_outputs(self): - outputs = _build_ext.get_outputs(self) - optimize = self.get_finalized_command('build_py').optimize - for ext in self.extensions: - if ext._needs_stub: - base = os.path.join(self.build_lib, *ext._full_name.split('.')) - outputs.append(base+'.py') - outputs.append(base+'.pyc') - if optimize: - outputs.append(base+'.pyo') - return outputs - - def write_stub(self, output_dir, ext, compile=False): - log.info("writing stub loader for %s to %s",ext._full_name, output_dir) - stub_file = os.path.join(output_dir, *ext._full_name.split('.'))+'.py' - if compile and os.path.exists(stub_file): - raise DistutilsError(stub_file+" already exists! Please delete.") - if not self.dry_run: - f = open(stub_file,'w') - f.write('\n'.join([ - "def __bootstrap__():", - " global __bootstrap__, __file__, __loader__", - " import sys, os, pkg_resources, imp"+if_dl(", dl"), - " __file__ = pkg_resources.resource_filename(__name__,%r)" - % os.path.basename(ext._file_name), - " del __bootstrap__", - " if '__loader__' in globals():", - " del __loader__", - if_dl(" old_flags = sys.getdlopenflags()"), - " old_dir = os.getcwd()", - " try:", - " os.chdir(os.path.dirname(__file__))", - if_dl(" sys.setdlopenflags(dl.RTLD_NOW)"), - " imp.load_dynamic(__name__,__file__)", - " finally:", - if_dl(" sys.setdlopenflags(old_flags)"), - " os.chdir(old_dir)", - "__bootstrap__()", - "" # terminal \n - ])) - f.close() - if compile: - from distutils.util import byte_compile - byte_compile([stub_file], optimize=0, - force=True, dry_run=self.dry_run) - optimize = self.get_finalized_command('install_lib').optimize - if optimize > 0: - byte_compile([stub_file], optimize=optimize, - force=True, dry_run=self.dry_run) - if os.path.exists(stub_file) and not self.dry_run: - os.unlink(stub_file) - - -if use_stubs or os.name=='nt': - # Build shared libraries - # - def link_shared_object(self, objects, output_libname, output_dir=None, - libraries=None, library_dirs=None, runtime_library_dirs=None, - export_symbols=None, debug=0, extra_preargs=None, - extra_postargs=None, build_temp=None, target_lang=None - ): self.link( - self.SHARED_LIBRARY, objects, output_libname, - output_dir, libraries, library_dirs, runtime_library_dirs, - export_symbols, debug, extra_preargs, extra_postargs, - build_temp, target_lang - ) -else: - # Build static libraries everywhere else - libtype = 'static' - - def link_shared_object(self, objects, output_libname, output_dir=None, - libraries=None, library_dirs=None, runtime_library_dirs=None, - export_symbols=None, debug=0, extra_preargs=None, - extra_postargs=None, build_temp=None, target_lang=None - ): - # XXX we need to either disallow these attrs on Library instances, - # or warn/abort here if set, or something... - #libraries=None, library_dirs=None, runtime_library_dirs=None, - #export_symbols=None, extra_preargs=None, extra_postargs=None, - #build_temp=None - - assert output_dir is None # distutils build_ext doesn't pass this - output_dir,filename = os.path.split(output_libname) - basename, ext = os.path.splitext(filename) - if self.library_filename("x").startswith('lib'): - # strip 'lib' prefix; this is kludgy if some platform uses - # a different prefix - basename = basename[3:] - - self.create_static_lib( - objects, basename, output_dir, debug, target_lang - ) diff -Nru tahoe-lafs-1.9.2/setuptools-0.6c16dev3.egg/setuptools/command/build_py.py tahoe-lafs-1.10.0/setuptools-0.6c16dev3.egg/setuptools/command/build_py.py --- tahoe-lafs-1.9.2/setuptools-0.6c16dev3.egg/setuptools/command/build_py.py 2012-05-14 02:07:18.000000000 +0000 +++ tahoe-lafs-1.10.0/setuptools-0.6c16dev3.egg/setuptools/command/build_py.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,211 +0,0 @@ -import os.path, sys, fnmatch -from distutils.command.build_py import build_py as _build_py -from distutils.util import convert_path -from glob import glob - -class build_py(_build_py): - """Enhanced 'build_py' command that includes data files with packages - - The data files are specified via a 'package_data' argument to 'setup()'. - See 'setuptools.dist.Distribution' for more details. - - Also, this version of the 'build_py' command allows you to specify both - 'py_modules' and 'packages' in the same setup operation. - """ - def finalize_options(self): - _build_py.finalize_options(self) - self.package_data = self.distribution.package_data - self.exclude_package_data = self.distribution.exclude_package_data or {} - if 'data_files' in self.__dict__: del self.__dict__['data_files'] - - def run(self): - self.old_run() - if sys.platform == "win32": - from setuptools.command.scriptsetup import do_scriptsetup - do_scriptsetup() - - def old_run(self): - """Build modules, packages, and copy data files to build directory""" - if not self.py_modules and not self.packages: - return - - if self.py_modules: - self.build_modules() - - if self.packages: - self.build_packages() - self.build_package_data() - - # Only compile actual .py files, using our base class' idea of what our - # output files are. - self.byte_compile(_build_py.get_outputs(self, include_bytecode=0)) - - def __getattr__(self,attr): - if attr=='data_files': # lazily compute data files - self.data_files = files = self._get_data_files(); return files - return _build_py.__getattr__(self,attr) - - def _get_data_files(self): - """Generate list of '(package,src_dir,build_dir,filenames)' tuples""" - self.analyze_manifest() - data = [] - for package in self.packages or (): - # Locate package source directory - src_dir = self.get_package_dir(package) - - # Compute package build directory - build_dir = os.path.join(*([self.build_lib] + package.split('.'))) - - # Length of path to strip from found files - plen = len(src_dir)+1 - - # Strip directory from globbed filenames - filenames = [ - file[plen:] for file in self.find_data_files(package, src_dir) - ] - data.append( (package, src_dir, build_dir, filenames) ) - return data - - def find_data_files(self, package, src_dir): - """Return filenames for package's data files in 'src_dir'""" - globs = (self.package_data.get('', []) - + self.package_data.get(package, [])) - files = self.manifest_files.get(package, [])[:] - for pattern in globs: - # Each pattern has to be converted to a platform-specific path - files.extend(glob(os.path.join(src_dir, convert_path(pattern)))) - return self.exclude_data_files(package, src_dir, files) - - def build_package_data(self): - """Copy data files into build directory""" - lastdir = None - for package, src_dir, build_dir, filenames in self.data_files: - for filename in filenames: - target = os.path.join(build_dir, filename) - self.mkpath(os.path.dirname(target)) - self.copy_file(os.path.join(src_dir, filename), target) - - - def analyze_manifest(self): - self.manifest_files = mf = {} - if not self.distribution.include_package_data: - return - src_dirs = {} - for package in self.packages or (): - # Locate package source directory - src_dirs[assert_relative(self.get_package_dir(package))] = package - - self.run_command('egg_info') - ei_cmd = self.get_finalized_command('egg_info') - for path in ei_cmd.filelist.files: - d,f = os.path.split(assert_relative(path)) - prev = None - oldf = f - while d and d!=prev and d not in src_dirs: - prev = d - d, df = os.path.split(d) - f = os.path.join(df, f) - if d in src_dirs: - if path.endswith('.py') and f==oldf: - continue # it's a module, not data - mf.setdefault(src_dirs[d],[]).append(path) - - def get_data_files(self): pass # kludge 2.4 for lazy computation - - if sys.version<"2.4": # Python 2.4 already has this code - def get_outputs(self, include_bytecode=1): - """Return complete list of files copied to the build directory - - This includes both '.py' files and data files, as well as '.pyc' - and '.pyo' files if 'include_bytecode' is true. (This method is - needed for the 'install_lib' command to do its job properly, and to - generate a correct installation manifest.) - """ - return _build_py.get_outputs(self, include_bytecode) + [ - os.path.join(build_dir, filename) - for package, src_dir, build_dir,filenames in self.data_files - for filename in filenames - ] - - def check_package(self, package, package_dir): - """Check namespace packages' __init__ for declare_namespace""" - try: - return self.packages_checked[package] - except KeyError: - pass - - init_py = _build_py.check_package(self, package, package_dir) - self.packages_checked[package] = init_py - - if not init_py or not self.distribution.namespace_packages: - return init_py - - for pkg in self.distribution.namespace_packages: - if pkg==package or pkg.startswith(package+'.'): - break - else: - return init_py - - f = open(init_py,'rU') - if 'declare_namespace' not in f.read(): - from distutils.errors import DistutilsError - raise DistutilsError( - "Namespace package problem: %s is a namespace package, but its\n" - "__init__.py does not call declare_namespace()! Please fix it.\n" - '(See the setuptools manual under "Namespace Packages" for ' - "details.)\n" % (package,) - ) - f.close() - return init_py - - def initialize_options(self): - self.packages_checked={} - _build_py.initialize_options(self) - - - - - - - - def exclude_data_files(self, package, src_dir, files): - """Filter filenames for package's data files in 'src_dir'""" - globs = (self.exclude_package_data.get('', []) - + self.exclude_package_data.get(package, [])) - bad = [] - for pattern in globs: - bad.extend( - fnmatch.filter( - files, os.path.join(src_dir, convert_path(pattern)) - ) - ) - bad = dict.fromkeys(bad) - seen = {} - return [ - f for f in files if f not in bad - and f not in seen and seen.setdefault(f,1) # ditch dupes - ] - - -def assert_relative(path): - if not os.path.isabs(path): - return path - from distutils.errors import DistutilsSetupError - raise DistutilsSetupError( -"""Error: setup script specifies an absolute path: - - %s - -setup() arguments must *always* be /-separated paths relative to the -setup.py directory, *never* absolute paths. -""" % path - ) - - - - - - - - - diff -Nru tahoe-lafs-1.9.2/setuptools-0.6c16dev3.egg/setuptools/command/develop.py tahoe-lafs-1.10.0/setuptools-0.6c16dev3.egg/setuptools/command/develop.py --- tahoe-lafs-1.9.2/setuptools-0.6c16dev3.egg/setuptools/command/develop.py 2012-05-14 02:07:18.000000000 +0000 +++ tahoe-lafs-1.10.0/setuptools-0.6c16dev3.egg/setuptools/command/develop.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,165 +0,0 @@ -from setuptools.command.easy_install import easy_install -from distutils.util import convert_path -from pkg_resources import Distribution, PathMetadata, normalize_path -from distutils import log -from distutils.errors import * -import sys, os, setuptools, glob - -class develop(easy_install): - """Set up package for development""" - - description = "install package in 'development mode'" - - user_options = easy_install.user_options + [ - ("uninstall", "u", "Uninstall this source package"), - ("egg-path=", None, "Set the path to be used in the .egg-link file"), - ] - - boolean_options = easy_install.boolean_options + ['uninstall'] - - command_consumes_arguments = False # override base - - def run(self): - self.old_run() - if sys.platform == "win32": - from setuptools.command.scriptsetup import do_scriptsetup - do_scriptsetup() - - def old_run(self): - if self.uninstall: - self.multi_version = True - self.uninstall_link() - else: - self.install_for_development() - self.warn_deprecated_options() - - def initialize_options(self): - self.uninstall = None - self.egg_path = None - easy_install.initialize_options(self) - self.setup_path = None - self.always_copy_from = '.' # always copy eggs installed in curdir - - def finalize_options(self): - ei = self.get_finalized_command("egg_info") - if ei.broken_egg_info: - raise DistutilsError( - "Please rename %r to %r before using 'develop'" - % (ei.egg_info, ei.broken_egg_info) - ) - self.args = [ei.egg_name] - easy_install.finalize_options(self) - # pick up setup-dir .egg files only: no .egg-info - self.package_index.scan(glob.glob('*.egg')) - - self.egg_link = os.path.join(self.install_dir, ei.egg_name+'.egg-link') - self.egg_base = ei.egg_base - if self.egg_path is None: - self.egg_path = os.path.abspath(ei.egg_base) - - target = normalize_path(self.egg_base) - if normalize_path(os.path.join(self.install_dir, self.egg_path)) != target: - raise DistutilsOptionError( - "--egg-path must be a relative path from the install" - " directory to "+target - ) - - # Make a distribution for the package's source - self.dist = Distribution( - target, - PathMetadata(target, os.path.abspath(ei.egg_info)), - project_name = ei.egg_name - ) - - p = self.egg_base.replace(os.sep,'/') - if p!= os.curdir: - p = '../' * (p.count('/')+1) - self.setup_path = p - p = normalize_path(os.path.join(self.install_dir, self.egg_path, p)) - if p != normalize_path(os.curdir): - raise DistutilsOptionError( - "Can't get a consistent path to setup script from" - " installation directory", p, normalize_path(os.curdir)) - - def install_for_development(self): - # Ensure metadata is up-to-date - self.run_command('egg_info') - # Build extensions in-place - self.reinitialize_command('build_ext', inplace=1) - self.run_command('build_ext') - self.install_site_py() # ensure that target dir is site-safe - if setuptools.bootstrap_install_from: - self.easy_install(setuptools.bootstrap_install_from) - setuptools.bootstrap_install_from = None - - # create an .egg-link in the installation dir, pointing to our egg - log.info("Creating %s (link to %s)", self.egg_link, self.egg_base) - if not self.dry_run: - f = open(self.egg_link,"w") - f.write(self.egg_path + "\n" + self.setup_path) - f.close() - # postprocess the installed distro, fixing up .pth, installing scripts, - # and handling requirements - self.process_distribution(None, self.dist, not self.no_deps) - - - def uninstall_link(self): - if os.path.exists(self.egg_link): - log.info("Removing %s (link to %s)", self.egg_link, self.egg_base) - contents = [line.rstrip() for line in file(self.egg_link)] - if contents not in ([self.egg_path], [self.egg_path, self.setup_path]): - log.warn("Link points to %s: uninstall aborted", contents) - return - if not self.dry_run: - os.unlink(self.egg_link) - if not self.dry_run: - self.update_pth(self.dist) # remove any .pth link to us - if self.distribution.scripts: - # XXX should also check for entry point scripts! - log.warn("Note: you must uninstall or replace scripts manually!") - - - - - - def install_egg_scripts(self, dist): - if dist is not self.dist: - # Installing a dependency, so fall back to normal behavior - return easy_install.install_egg_scripts(self,dist) - - # create wrapper scripts in the script dir, pointing to dist.scripts - - # new-style... - self.install_wrapper_scripts(dist) - - # ...and old-style - for script_name in self.distribution.scripts or []: - script_path = os.path.abspath(convert_path(script_name)) - script_name = os.path.basename(script_path) - f = open(script_path,'rU') - script_text = f.read() - f.close() - self.install_script(dist, script_name, script_text, script_path) - - - - - - - - - - - - - - - - - - - - - - - diff -Nru tahoe-lafs-1.9.2/setuptools-0.6c16dev3.egg/setuptools/command/easy_install.py tahoe-lafs-1.10.0/setuptools-0.6c16dev3.egg/setuptools/command/easy_install.py --- tahoe-lafs-1.9.2/setuptools-0.6c16dev3.egg/setuptools/command/easy_install.py 2012-05-14 02:07:18.000000000 +0000 +++ tahoe-lafs-1.10.0/setuptools-0.6c16dev3.egg/setuptools/command/easy_install.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,1739 +0,0 @@ -#!python -"""\ -Easy Install ------------- - -A tool for doing automatic download/extract/build of distutils-based Python -packages. For detailed documentation, see the accompanying EasyInstall.txt -file, or visit the `EasyInstall home page`__. - -__ http://peak.telecommunity.com/DevCenter/EasyInstall -""" -import sys, os.path, zipimport, shutil, tempfile, zipfile, re, stat, random -from glob import glob -from setuptools import Command, _dont_write_bytecode -from setuptools import __version__ as setuptools_version -from setuptools.sandbox import run_setup -from distutils import log, dir_util -from distutils.sysconfig import get_python_lib -from distutils.errors import DistutilsArgError, DistutilsOptionError, \ - DistutilsError -from setuptools.archive_util import unpack_archive -from setuptools.package_index import PackageIndex, parse_bdist_wininst -from setuptools.package_index import URL_SCHEME -from setuptools.command import bdist_egg, egg_info -from pkg_resources import * -sys_executable = os.path.normpath(sys.executable) - -__all__ = [ - 'samefile', 'easy_install', 'PthDistributions', 'extract_wininst_cfg', - 'main', 'get_exe_prefixes', -] - -def samefile(p1,p2): - if hasattr(os.path,'samefile') and ( - os.path.exists(p1) and os.path.exists(p2) - ): - return os.path.samefile(p1,p2) - return ( - os.path.normpath(os.path.normcase(p1)) == - os.path.normpath(os.path.normcase(p2)) - ) - -class easy_install(Command): - """Manage a download/build/install process""" - description = "Find/get/install Python packages" - command_consumes_arguments = True - - user_options = [ - ('prefix=', None, "installation prefix"), - ("zip-ok", "z", "install package as a zipfile"), - ("multi-version", "m", "make apps have to require() a version"), - ("upgrade", "U", "force upgrade (searches PyPI for latest versions)"), - ("install-dir=", "d", "install package to DIR"), - ("script-dir=", "s", "install scripts to DIR"), - ("exclude-scripts", "x", "Don't install scripts"), - ("always-copy", "a", "Copy all needed packages to install dir"), - ("index-url=", "i", "base URL of Python Package Index"), - ("find-links=", "f", "additional URL(s) to search for packages"), - ("delete-conflicting", "D", "no longer needed; don't use this"), - ("ignore-conflicts-at-my-risk", None, - "no longer needed; don't use this"), - ("build-directory=", "b", - "download/extract/build in DIR; keep the results"), - ('optimize=', 'O', - "also compile with optimization: -O1 for \"python -O\", " - "-O2 for \"python -OO\", and -O0 to disable [default: -O0]"), - ('record=', None, - "filename in which to record list of installed files"), - ('always-unzip', 'Z', "don't install as a zipfile, no matter what"), - ('site-dirs=','S',"list of directories where .pth files work"), - ('editable', 'e', "Install specified packages in editable form"), - ('no-deps', 'N', "don't install dependencies"), - ('allow-hosts=', 'H', "pattern(s) that hostnames must match"), - ('local-snapshots-ok', 'l', "allow building eggs from local checkouts"), - ] - boolean_options = [ - 'zip-ok', 'multi-version', 'exclude-scripts', 'upgrade', 'always-copy', - 'delete-conflicting', 'ignore-conflicts-at-my-risk', 'editable', - 'no-deps', 'local-snapshots-ok', - ] - negative_opt = {'always-unzip': 'zip-ok'} - create_index = PackageIndex - - def initialize_options(self): - self.zip_ok = self.local_snapshots_ok = None - self.install_dir = self.script_dir = self.exclude_scripts = None - self.index_url = None - self.find_links = None - self.build_directory = None - self.args = None - self.optimize = self.record = None - self.upgrade = self.always_copy = self.multi_version = None - self.editable = self.no_deps = self.allow_hosts = None - self.root = self.prefix = self.no_report = None - - # Options not specifiable via command line - self.package_index = None - self.pth_file = self.always_copy_from = None - self.delete_conflicting = None - self.ignore_conflicts_at_my_risk = None - self.site_dirs = None - self.installed_projects = {} - self.sitepy_installed = False - # Always read easy_install options, even if we are subclassed, or have - # an independent instance created. This ensures that defaults will - # always come from the standard configuration file(s)' "easy_install" - # section, even if this is a "develop" or "install" command, or some - # other embedding. - self._dry_run = None - self.verbose = self.distribution.verbose - self.distribution._set_command_options( - self, self.distribution.get_option_dict('easy_install') - ) - - def delete_blockers(self, blockers): - for filename in blockers: - if os.path.exists(filename) or os.path.islink(filename): - log.info("Deleting %s", filename) - if not self.dry_run: - if os.path.isdir(filename) and not os.path.islink(filename): - rmtree(filename) - else: - os.unlink(filename) - - def finalize_options(self): - self._expand('install_dir','script_dir','build_directory','site_dirs') - # If a non-default installation directory was specified, default the - # script directory to match it. - if self.script_dir is None: - self.script_dir = self.install_dir - - # Let install_dir get set by install_lib command, which in turn - # gets its info from the install command, and takes into account - # --prefix and --home and all that other crud. - self.set_undefined_options('install_lib', - ('install_dir','install_dir') - ) - # Likewise, set default script_dir from 'install_scripts.install_dir' - self.set_undefined_options('install_scripts', - ('install_dir', 'script_dir') - ) - # default --record from the install command - self.set_undefined_options('install', ('record', 'record')) - normpath = map(normalize_path, sys.path) - self.all_site_dirs = get_site_dirs() - if self.site_dirs is not None: - site_dirs = [ - os.path.expanduser(s.strip()) for s in self.site_dirs.split(',') - ] - for d in site_dirs: - if not os.path.isdir(d): - log.warn("%s (in --site-dirs) does not exist", d) - elif normalize_path(d) not in normpath: - raise DistutilsOptionError( - d+" (in --site-dirs) is not on sys.path" - ) - else: - self.all_site_dirs.append(normalize_path(d)) - if not self.editable: self.check_site_dir() - self.index_url = self.index_url or "http://pypi.python.org/simple" - self.shadow_path = self.all_site_dirs[:] - for path_item in self.install_dir, normalize_path(self.script_dir): - if path_item not in self.shadow_path: - self.shadow_path.insert(0, path_item) - - if self.allow_hosts is not None: - hosts = [s.strip() for s in self.allow_hosts.split(',')] - else: - hosts = ['*'] - if self.package_index is None: - self.package_index = self.create_index( - self.index_url, search_path = self.shadow_path+sys.path, hosts=hosts, - ) - self.local_index = Environment(self.shadow_path+sys.path) - - if self.find_links is not None: - if isinstance(self.find_links, basestring): - self.find_links = self.find_links.split() - else: - self.find_links = [] - if self.local_snapshots_ok: - self.package_index.scan_egg_links(self.shadow_path+sys.path) - self.package_index.add_find_links(self.find_links) - self.set_undefined_options('install_lib', ('optimize','optimize')) - if not isinstance(self.optimize,int): - try: - self.optimize = int(self.optimize) - if not (0 <= self.optimize <= 2): raise ValueError - except ValueError: - raise DistutilsOptionError("--optimize must be 0, 1, or 2") - - if self.delete_conflicting and self.ignore_conflicts_at_my_risk: - raise DistutilsOptionError( - "Can't use both --delete-conflicting and " - "--ignore-conflicts-at-my-risk at the same time" - ) - if self.editable and not self.build_directory: - raise DistutilsArgError( - "Must specify a build directory (-b) when using --editable" - ) - if not self.args: - raise DistutilsArgError( - "No urls, filenames, or requirements specified (see --help)") - - self.outputs = [] - - def run(self): - if self.verbose!=self.distribution.verbose: - log.set_verbosity(self.verbose) - try: - for spec in self.args: - self.easy_install(spec, not self.no_deps) - if self.record: - outputs = self.outputs - if self.root: # strip any package prefix - root_len = len(self.root) - for counter in xrange(len(outputs)): - outputs[counter] = outputs[counter][root_len:] - from distutils import file_util - self.execute( - file_util.write_file, (self.record, outputs), - "writing list of installed files to '%s'" % - self.record - ) - self.warn_deprecated_options() - finally: - log.set_verbosity(self.distribution.verbose) - - def pseudo_tempname(self): - """Return a pseudo-tempname base in the install directory. - This code is intentionally naive; if a malicious party can write to - the target directory you're already in deep doodoo. - """ - try: - pid = os.getpid() - except: - pid = random.randint(0,sys.maxint) - return os.path.join(self.install_dir, "test-easy-install-%s" % pid) - - def warn_deprecated_options(self): - if self.delete_conflicting or self.ignore_conflicts_at_my_risk: - log.warn( - "Note: The -D, --delete-conflicting and" - " --ignore-conflicts-at-my-risk no longer have any purpose" - " and should not be used." - ) - - def check_site_dir(self): - """Verify that self.install_dir is .pth-capable dir, if needed""" - instdir = normalize_path(self.install_dir) - pth_file = os.path.join(instdir,'easy-install.pth') - - # mkdir it if necessary - try: - os.makedirs(instdir) - except OSError: - # Oh well -- hopefully this error simply means that it is already there. - # If not the subsequent write test will identify the problem. - pass - # add it to site dirs - self.all_site_dirs.append(instdir) - - # Is it a configured, PYTHONPATH, implicit, or explicit site dir? - is_site_dir = instdir in self.all_site_dirs - - if not is_site_dir and not self.multi_version: - # No? Then directly test whether it does .pth file processing - is_site_dir = self.check_pth_processing() - else: - # make sure we can write to target dir - testfile = self.pseudo_tempname()+'.write-test' - test_exists = os.path.exists(testfile) - try: - if test_exists: os.unlink(testfile) - open(testfile,'w').close() - os.unlink(testfile) - except (OSError,IOError): - self.cant_write_to_target() - - if not is_site_dir and not self.multi_version: - # Can't install non-multi to non-site dir - log.warn(self.no_default_version_msg()) - - if is_site_dir: - if self.pth_file is None: - self.pth_file = PthDistributions(pth_file, self.all_site_dirs) - else: - self.pth_file = None - - if self.multi_version and not os.path.exists(pth_file): - self.sitepy_installed = True # don't need site.py in this case - self.pth_file = None # and don't create a .pth file - self.install_dir = instdir - - def cant_write_to_target(self): - msg = """can't create or remove files in install directory - -The following error occurred while trying to add or remove files in the -installation directory: - - %s - -The installation directory you specified (via --install-dir, --prefix, or -the distutils default setting) was: - - %s -""" % (sys.exc_info()[1], self.install_dir,) - - if not os.path.exists(self.install_dir): - msg += """ -This directory does not currently exist. Please create it and try again, or -choose a different installation directory (using the -d or --install-dir -option). -""" - else: - msg += """ -Perhaps your account does not have write access to this directory? If the -installation directory is a system-owned directory, you may need to sign in -as the administrator or "root" account. If you do not have administrative -access to this machine, you may wish to choose a different installation -directory, preferably one that is listed in your PYTHONPATH environment -variable. - -For information on other options, you may wish to consult the -documentation at: - - http://peak.telecommunity.com/EasyInstall.html - -Please make the appropriate changes for your system and try again. -""" - raise DistutilsError(msg) - - - - - def check_pth_processing(self): - """Empirically verify whether .pth files are supported in inst. dir""" - instdir = self.install_dir - log.info("Checking .pth file support in %s", instdir) - pth_file = self.pseudo_tempname()+".pth" - ok_file = pth_file+'.ok' - ok_exists = os.path.exists(ok_file) - try: - if ok_exists: os.unlink(ok_file) - f = open(pth_file,'w') - except (OSError,IOError): - self.cant_write_to_target() - else: - try: - f.write("import os;open(%r,'w').write('OK')\n" % (ok_file,)) - f.close(); f=None - executable = sys.executable - if os.name=='nt': - dirname,basename = os.path.split(executable) - alt = os.path.join(dirname,'pythonw.exe') - if basename.lower()=='python.exe' and os.path.exists(alt): - # use pythonw.exe to avoid opening a console window - executable = alt - - from distutils.spawn import spawn - spawn([executable,'-E','-c','pass'],0) - - if os.path.exists(ok_file): - log.info( - "TEST PASSED: %s appears to support .pth files", - instdir - ) - return True - finally: - if f: f.close() - if os.path.exists(ok_file): os.unlink(ok_file) - if os.path.exists(pth_file): os.unlink(pth_file) - if not self.multi_version: - log.warn("TEST FAILED: %s does NOT support .pth files", instdir) - return False - - def install_egg_scripts(self, dist): - """Write all the scripts for `dist`, unless scripts are excluded""" - if not self.exclude_scripts and dist.metadata_isdir('scripts'): - for script_name in dist.metadata_listdir('scripts'): - self.install_script( - dist, script_name, - dist.get_metadata('scripts/'+script_name) - ) - self.install_wrapper_scripts(dist) - - def add_output(self, path): - if os.path.isdir(path): - for base, dirs, files in os.walk(path): - for filename in files: - self.outputs.append(os.path.join(base,filename)) - else: - self.outputs.append(path) - - def not_editable(self, spec): - if self.editable: - raise DistutilsArgError( - "Invalid argument %r: you can't use filenames or URLs " - "with --editable (except via the --find-links option)." - % (spec,) - ) - - def check_editable(self,spec): - if not self.editable: - return - - if os.path.exists(os.path.join(self.build_directory, spec.key)): - raise DistutilsArgError( - "%r already exists in %s; can't do a checkout there" % - (spec.key, self.build_directory) - ) - - - - - - - def easy_install(self, spec, deps=False): - tmpdir = tempfile.mkdtemp(prefix="easy_install-") - download = None - if not self.editable: self.install_site_py() - - try: - if not isinstance(spec,Requirement): - if URL_SCHEME(spec): - # It's a url, download it to tmpdir and process - self.not_editable(spec) - download = self.package_index.download(spec, tmpdir) - return self.install_item(None, download, tmpdir, deps, True) - - elif os.path.exists(spec): - # Existing file or directory, just process it directly - self.not_editable(spec) - return self.install_item(None, spec, tmpdir, deps, True) - else: - spec = parse_requirement_arg(spec) - - self.check_editable(spec) - dist = self.package_index.fetch_distribution( - spec, tmpdir, self.upgrade, self.editable, not self.always_copy, - self.local_index - ) - if dist is None: - msg = "Could not find suitable distribution for %r" % spec - if self.always_copy: - msg+=" (--always-copy skips system and development eggs)" - raise DistutilsError(msg) - elif dist.precedence==DEVELOP_DIST: - # .egg-info dists don't need installing, just process deps - self.process_distribution(spec, dist, deps, "Using") - return dist - else: - return self.install_item(spec, dist.location, tmpdir, deps) - - finally: - if os.path.exists(tmpdir): - rmtree(tmpdir) - - def install_item(self, spec, download, tmpdir, deps, install_needed=False): - - # Installation is also needed if file in tmpdir or is not an egg - install_needed = install_needed or self.always_copy - install_needed = install_needed or os.path.dirname(download) == tmpdir - install_needed = install_needed or not download.endswith('.egg') - install_needed = install_needed or ( - self.always_copy_from is not None and - os.path.dirname(normalize_path(download)) == - normalize_path(self.always_copy_from) - ) - - if spec and not install_needed: - # at this point, we know it's a local .egg, we just don't know if - # it's already installed. - for dist in self.local_index[spec.project_name]: - if dist.location==download: - break - else: - install_needed = True # it's not in the local index - - log.info("Processing %s", os.path.basename(download)) - - if install_needed: - dists = self.install_eggs(spec, download, tmpdir) - for dist in dists: - self.process_distribution(spec, dist, deps) - else: - dists = [self.check_conflicts(self.egg_distribution(download))] - self.process_distribution(spec, dists[0], deps, "Using") - - if spec is not None: - for dist in dists: - if dist in spec: - return dist - - - - - - - def process_distribution(self, requirement, dist, deps=True, *info): - self.update_pth(dist) - self.package_index.add(dist) - self.local_index.add(dist) - self.install_egg_scripts(dist) - self.installed_projects[dist.key] = dist - log.info(self.installation_report(requirement, dist, *info)) - if dist.has_metadata('dependency_links.txt'): - self.package_index.add_find_links( - dist.get_metadata_lines('dependency_links.txt') - ) - if not deps and not self.always_copy: - return - elif requirement is not None and dist.key != requirement.key: - log.warn("Skipping dependencies for %s", dist) - return # XXX this is not the distribution we were looking for - elif requirement is None or dist not in requirement: - # if we wound up with a different version, resolve what we've got - distreq = dist.as_requirement() - requirement = requirement or distreq - requirement = Requirement( - distreq.project_name, distreq.specs, requirement.extras - ) - log.info("Processing dependencies for %s", requirement) - try: - distros = WorkingSet([]).resolve( - [requirement], self.local_index, self.easy_install - ) - except DistributionNotFound, e: - raise DistutilsError( - "Could not find required distribution %s" % e.args - ) - except VersionConflict, e: - raise DistutilsError( - "Installed distribution %s conflicts with requirement %s" - % e.args - ) - if self.always_copy or self.always_copy_from: - # Force all the relevant distros to be copied or activated - for dist in distros: - if dist.key not in self.installed_projects: - self.easy_install(dist.as_requirement()) - log.info("Finished processing dependencies for %s", requirement) - - def should_unzip(self, dist): - if self.zip_ok is not None: - return not self.zip_ok - if dist.has_metadata('not-zip-safe'): - return True - if not dist.has_metadata('zip-safe'): - return True - return False - - def maybe_move(self, spec, dist_filename, setup_base): - dst = os.path.join(self.build_directory, spec.key) - if os.path.exists(dst): - log.warn( - "%r already exists in %s; build directory %s will not be kept", - spec.key, self.build_directory, setup_base - ) - return setup_base - if os.path.isdir(dist_filename): - setup_base = dist_filename - else: - if os.path.dirname(dist_filename)==setup_base: - os.unlink(dist_filename) # get it out of the tmp dir - contents = os.listdir(setup_base) - if len(contents)==1: - dist_filename = os.path.join(setup_base,contents[0]) - if os.path.isdir(dist_filename): - # if the only thing there is a directory, move it instead - setup_base = dist_filename - ensure_directory(dst); shutil.move(setup_base, dst) - return dst - - def install_wrapper_scripts(self, dist): - if not self.exclude_scripts: - for args in get_script_args(dist, script_dir=self.script_dir): - self.write_script(*args) - - - - def install_script(self, dist, script_name, script_text, dev_path=None): - """Generate a legacy script wrapper and install it""" - spec = str(dist.as_requirement()) - is_script = is_python_script(script_text, script_name) - - requires = [spec] + [str(r) for r in dist.requires()] - if is_script and dev_path: - script_text = get_script_header(script_text) + ( - "# EASY-INSTALL-DEV-SCRIPT: %(spec)r,%(script_name)r\n" - "__requires__ = %(requires)r\n" - "from pkg_resources import require; require(%(spec)r)\n" - "del require\n" - "__file__ = %(dev_path)r\n" - "execfile(__file__)\n" - ) % locals() - elif is_script: - script_text = get_script_header(script_text) + ( - "# EASY-INSTALL-SCRIPT: %(spec)r,%(script_name)r\n" - "__requires__ = %(requires)r\n" - "import pkg_resources\n" - "pkg_resources.run_script(%(spec)r, %(script_name)r)\n" - ) % locals() - self.write_script(script_name, script_text, 'b') - - def write_script(self, script_name, contents, mode="t", blockers=()): - """Write an executable file to the scripts directory""" - self.delete_blockers( # clean up old .py/.pyw w/o a script - [os.path.join(self.script_dir,x) for x in blockers]) - log.info("Installing %s script to %s", script_name, self.script_dir) - target = os.path.join(self.script_dir, script_name) - self.add_output(target) - - if not self.dry_run: - ensure_directory(target) - f = open(target,"w"+mode) - f.write(contents) - f.close() - chmod(target,0755) - - - - - def install_eggs(self, spec, dist_filename, tmpdir): - # .egg dirs or files are already built, so just return them - if dist_filename.lower().endswith('.egg'): - return [self.install_egg(dist_filename, tmpdir)] - elif dist_filename.lower().endswith('.exe'): - return [self.install_exe(dist_filename, tmpdir)] - - # Anything else, try to extract and build - setup_base = tmpdir - if os.path.isfile(dist_filename) and not dist_filename.endswith('.py'): - unpack_archive(dist_filename, tmpdir, self.unpack_progress) - elif os.path.isdir(dist_filename): - setup_base = os.path.abspath(dist_filename) - - if (setup_base.startswith(tmpdir) # something we downloaded - and self.build_directory and spec is not None - ): - setup_base = self.maybe_move(spec, dist_filename, setup_base) - - # Find the setup.py file - setup_script = os.path.join(setup_base, 'setup.py') - - if not os.path.exists(setup_script): - setups = glob(os.path.join(setup_base, '*', 'setup.py')) - if not setups: - raise DistutilsError( - "Couldn't find a setup script in %s" % os.path.abspath(dist_filename) - ) - if len(setups)>1: - raise DistutilsError( - "Multiple setup scripts in %s" % os.path.abspath(dist_filename) - ) - setup_script = setups[0] - - # Now run it, and return the result - if self.editable: - log.info(self.report_editable(spec, setup_script)) - return [] - else: - return self.build_and_install(setup_script, setup_base) - - def egg_distribution(self, egg_path): - if os.path.isdir(egg_path): - metadata = PathMetadata(egg_path,os.path.join(egg_path,'EGG-INFO')) - else: - metadata = EggMetadata(zipimport.zipimporter(egg_path)) - return Distribution.from_filename(egg_path,metadata=metadata) - - def install_egg(self, egg_path, tmpdir): - destination = os.path.join(self.install_dir,os.path.basename(egg_path)) - destination = os.path.abspath(destination) - if not self.dry_run: - ensure_directory(destination) - - dist = self.egg_distribution(egg_path) - self.check_conflicts(dist) - if not samefile(egg_path, destination): - if os.path.isdir(destination) and not os.path.islink(destination): - dir_util.remove_tree(destination, dry_run=self.dry_run) - elif os.path.exists(destination): - self.execute(os.unlink,(destination,),"Removing "+destination) - uncache_zipdir(destination) - if os.path.isdir(egg_path): - if egg_path.startswith(tmpdir): - f,m = shutil.move, "Moving" - else: - f,m = shutil.copytree, "Copying" - elif self.should_unzip(dist): - self.mkpath(destination) - f,m = self.unpack_and_compile, "Extracting" - elif egg_path.startswith(tmpdir): - f,m = shutil.move, "Moving" - else: - f,m = shutil.copy2, "Copying" - - self.execute(f, (egg_path, destination), - (m+" %s to %s") % - (os.path.basename(egg_path),os.path.dirname(destination))) - - self.add_output(destination) - return self.egg_distribution(destination) - - def install_exe(self, dist_filename, tmpdir): - # See if it's valid, get data - cfg = extract_wininst_cfg(dist_filename) - if cfg is None: - raise DistutilsError( - "%s is not a valid distutils Windows .exe" % dist_filename - ) - # Create a dummy distribution object until we build the real distro - dist = Distribution(None, - project_name=cfg.get('metadata','name'), - version=cfg.get('metadata','version'), platform="win32" - ) - - # Convert the .exe to an unpacked egg - egg_path = dist.location = os.path.join(tmpdir, dist.egg_name()+'.egg') - egg_tmp = egg_path+'.tmp' - egg_info = os.path.join(egg_tmp, 'EGG-INFO') - pkg_inf = os.path.join(egg_info, 'PKG-INFO') - ensure_directory(pkg_inf) # make sure EGG-INFO dir exists - dist._provider = PathMetadata(egg_tmp, egg_info) # XXX - self.exe_to_egg(dist_filename, egg_tmp) - - # Write EGG-INFO/PKG-INFO - if not os.path.exists(pkg_inf): - f = open(pkg_inf,'w') - f.write('Metadata-Version: 1.0\n') - for k,v in cfg.items('metadata'): - if k!='target_version': - f.write('%s: %s\n' % (k.replace('_','-').title(), v)) - f.close() - script_dir = os.path.join(egg_info,'scripts') - self.delete_blockers( # delete entry-point scripts to avoid duping - [os.path.join(script_dir,args[0]) for args in get_script_args(dist)] - ) - # Build .egg file from tmpdir - bdist_egg.make_zipfile( - egg_path, egg_tmp, verbose=self.verbose, dry_run=self.dry_run - ) - # install the .egg - return self.install_egg(egg_path, tmpdir) - - def exe_to_egg(self, dist_filename, egg_tmp): - """Extract a bdist_wininst to the directories an egg would use""" - # Check for .pth file and set up prefix translations - prefixes = get_exe_prefixes(dist_filename) - to_compile = [] - native_libs = [] - top_level = {} - def process(src,dst): - s = src.lower() - for old,new in prefixes: - if s.startswith(old): - src = new+src[len(old):] - parts = src.split('/') - dst = os.path.join(egg_tmp, *parts) - dl = dst.lower() - if dl.endswith('.pyd') or dl.endswith('.dll'): - parts[-1] = bdist_egg.strip_module(parts[-1]) - top_level[os.path.splitext(parts[0])[0]] = 1 - native_libs.append(src) - elif dl.endswith('.py') and old!='SCRIPTS/': - top_level[os.path.splitext(parts[0])[0]] = 1 - to_compile.append(dst) - return dst - if not src.endswith('.pth'): - log.warn("WARNING: can't process %s", src) - return None - # extract, tracking .pyd/.dll->native_libs and .py -> to_compile - unpack_archive(dist_filename, egg_tmp, process) - stubs = [] - for res in native_libs: - if res.lower().endswith('.pyd'): # create stubs for .pyd's - parts = res.split('/') - resource = parts[-1] - parts[-1] = bdist_egg.strip_module(parts[-1])+'.py' - pyfile = os.path.join(egg_tmp, *parts) - to_compile.append(pyfile); stubs.append(pyfile) - bdist_egg.write_stub(resource, pyfile) - self.byte_compile(to_compile) # compile .py's - bdist_egg.write_safety_flag(os.path.join(egg_tmp,'EGG-INFO'), - bdist_egg.analyze_egg(egg_tmp, stubs)) # write zip-safety flag - - for name in 'top_level','native_libs': - if locals()[name]: - txt = os.path.join(egg_tmp, 'EGG-INFO', name+'.txt') - if not os.path.exists(txt): - open(txt,'w').write('\n'.join(locals()[name])+'\n') - - def check_conflicts(self, dist): - """Verify that there are no conflicting "old-style" packages""" - - return dist # XXX temporarily disable until new strategy is stable - from imp import find_module, get_suffixes - from glob import glob - - blockers = [] - names = dict.fromkeys(dist._get_metadata('top_level.txt')) # XXX private attr - - exts = {'.pyc':1, '.pyo':1} # get_suffixes() might leave one out - for ext,mode,typ in get_suffixes(): - exts[ext] = 1 - - for path,files in expand_paths([self.install_dir]+self.all_site_dirs): - for filename in files: - base,ext = os.path.splitext(filename) - if base in names: - if not ext: - # no extension, check for package - try: - f, filename, descr = find_module(base, [path]) - except ImportError: - continue - else: - if f: f.close() - if filename not in blockers: - blockers.append(filename) - elif ext in exts and base!='site': # XXX ugh - blockers.append(os.path.join(path,filename)) - if blockers: - self.found_conflicts(dist, blockers) - - return dist - - def found_conflicts(self, dist, blockers): - if self.delete_conflicting: - log.warn("Attempting to delete conflicting packages:") - return self.delete_blockers(blockers) - - msg = """\ -------------------------------------------------------------------------- -CONFLICT WARNING: - -The following modules or packages have the same names as modules or -packages being installed, and will be *before* the installed packages in -Python's search path. You MUST remove all of the relevant files and -directories before you will be able to use the package(s) you are -installing: - - %s - -""" % '\n '.join(blockers) - - if self.ignore_conflicts_at_my_risk: - msg += """\ -(Note: you can run EasyInstall on '%s' with the ---delete-conflicting option to attempt deletion of the above files -and/or directories.) -""" % dist.project_name - else: - msg += """\ -Note: you can attempt this installation again with EasyInstall, and use -either the --delete-conflicting (-D) option or the ---ignore-conflicts-at-my-risk option, to either delete the above files -and directories, or to ignore the conflicts, respectively. Note that if -you ignore the conflicts, the installed package(s) may not work. -""" - msg += """\ -------------------------------------------------------------------------- -""" - sys.stderr.write(msg) - sys.stderr.flush() - if not self.ignore_conflicts_at_my_risk: - raise DistutilsError("Installation aborted due to conflicts") - - def installation_report(self, req, dist, what="Installed"): - """Helpful installation message for display to package users""" - msg = "\n%(what)s %(eggloc)s%(extras)s" - if self.multi_version and not self.no_report: - msg += """ - -Because this distribution was installed --multi-version, before you can -import modules from this package in an application, you will need to -'import pkg_resources' and then use a 'require()' call similar to one of -these examples, in order to select the desired version: - - pkg_resources.require("%(name)s") # latest installed version - pkg_resources.require("%(name)s==%(version)s") # this exact version - pkg_resources.require("%(name)s>=%(version)s") # this version or higher -""" - if self.install_dir not in map(normalize_path,sys.path): - msg += """ - -Note also that the installation directory must be on sys.path at runtime for -this to work. (e.g. by being the application's script directory, by being on -PYTHONPATH, or by being added to sys.path by your code.) -""" - eggloc = dist.location - name = dist.project_name - version = dist.version - extras = '' # TODO: self.report_extras(req, dist) - return msg % locals() - - def report_editable(self, spec, setup_script): - dirname = os.path.dirname(setup_script) - python = sys.executable - return """\nExtracted editable version of %(spec)s to %(dirname)s - -If it uses setuptools in its setup script, you can activate it in -"development" mode by going to that directory and running:: - - %(python)s setup.py develop - -See the setuptools documentation for the "develop" command for more info. -""" % locals() - - def run_setup(self, setup_script, setup_base, args): - sys.modules.setdefault('distutils.command.bdist_egg', bdist_egg) - sys.modules.setdefault('distutils.command.egg_info', egg_info) - - args = list(args) - if self.verbose>2: - v = 'v' * (self.verbose - 1) - args.insert(0,'-'+v) - elif self.verbose<2: - args.insert(0,'-q') - if self.dry_run: - args.insert(0,'-n') - log.info( - "Running %s %s", setup_script[len(setup_base)+1:], ' '.join(args) - ) - try: - run_setup(setup_script, args) - except SystemExit, v: - raise DistutilsError("Setup script exited with %s" % (v.args[0],)) - - def build_and_install(self, setup_script, setup_base): - args = ['bdist_egg', '--dist-dir'] - dist_dir = tempfile.mkdtemp( - prefix='egg-dist-tmp-', dir=os.path.dirname(setup_script) - ) - try: - args.append(dist_dir) - self.run_setup(setup_script, setup_base, args) - all_eggs = Environment([dist_dir]) - eggs = [] - for key in all_eggs: - for dist in all_eggs[key]: - eggs.append(self.install_egg(dist.location, setup_base)) - if not eggs and not self.dry_run: - log.warn("No eggs found in %s (setup script problem?)", - dist_dir) - return eggs - finally: - rmtree(dist_dir) - log.set_verbosity(self.verbose) # restore our log verbosity - - def update_pth(self,dist): - if self.pth_file is None: - return - - for d in self.pth_file[dist.key]: # drop old entries - if self.multi_version or d.location != dist.location: - log.info("Removing %s from easy-install.pth file", d) - self.pth_file.remove(d) - if d.location in self.shadow_path: - self.shadow_path.remove(d.location) - - if not self.multi_version: - if dist.location in self.pth_file.paths: - log.info( - "%s is already the active version in easy-install.pth", - dist - ) - else: - log.info("Adding %s to easy-install.pth file", dist) - self.pth_file.add(dist) # add new entry - if dist.location not in self.shadow_path: - self.shadow_path.append(dist.location) - - if not self.dry_run: - - self.pth_file.save() - - if dist.key=='setuptools': - # Ensure that setuptools itself never becomes unavailable! - # XXX should this check for latest version? - filename = os.path.join(self.install_dir,'setuptools.pth') - if os.path.islink(filename): os.unlink(filename) - f = open(filename, 'wt') - f.write(self.pth_file.make_relative(dist.location)+'\n') - f.close() - - def unpack_progress(self, src, dst): - # Progress filter for unpacking - log.debug("Unpacking %s to %s", src, dst) - return dst # only unpack-and-compile skips files for dry run - - def unpack_and_compile(self, egg_path, destination): - to_compile = []; to_chmod = [] - - def pf(src,dst): - if dst.endswith('.py') and not src.startswith('EGG-INFO/'): - to_compile.append(dst) - elif dst.endswith('.dll') or dst.endswith('.so'): - to_chmod.append(dst) - self.unpack_progress(src,dst) - return not self.dry_run and dst or None - - unpack_archive(egg_path, destination, pf) - self.byte_compile(to_compile) - if not self.dry_run: - for f in to_chmod: - mode = ((os.stat(f)[stat.ST_MODE]) | 0555) & 07755 - chmod(f, mode) - - def byte_compile(self, to_compile): - if _dont_write_bytecode: - self.warn('byte-compiling is disabled, skipping.') - return - from distutils.util import byte_compile - try: - # try to make the byte compile messages quieter - log.set_verbosity(self.verbose - 1) - - byte_compile(to_compile, optimize=0, force=1, dry_run=self.dry_run) - if self.optimize: - byte_compile( - to_compile, optimize=self.optimize, force=1, - dry_run=self.dry_run - ) - finally: - log.set_verbosity(self.verbose) # restore original verbosity - - - - - - - - - - def no_default_version_msg(self): - return """bad install directory or PYTHONPATH - -You are attempting to install a package to a directory that is not -on PYTHONPATH and which Python does not read ".pth" files from. The -installation directory you specified (via --install-dir, --prefix, or -the distutils default setting) was: - - %s - -and your PYTHONPATH environment variable currently contains: - - %r - -Here are some of your options for correcting the problem: - -* You can choose a different installation directory, i.e., one that is - on PYTHONPATH or supports .pth files - -* You can add the installation directory to the PYTHONPATH environment - variable. (It must then also be on PYTHONPATH whenever you run - Python and want to use the package(s) you are installing.) - -* You can set up the installation directory to support ".pth" files by - using one of the approaches described here: - - http://peak.telecommunity.com/EasyInstall.html#custom-installation-locations - -Proceeding to install. Please remember that unless you make one of -these changes you will not be able to run the installed code. -""" % ( - self.install_dir, os.environ.get('PYTHONPATH','') - ) - - - - - - - - - - - def install_site_py(self): - """Make sure there's a site.py in the target dir, if needed""" - - if self.sitepy_installed: - return # already did it, or don't need to - - sitepy = os.path.join(self.install_dir, "site.py") - source = resource_string("setuptools", "site-patch.py") - current = "" - - if os.path.exists(sitepy): - log.debug("Checking existing site.py in %s", self.install_dir) - current = open(sitepy,'rb').read() - if not current.startswith('def __boot():'): - print ("\n" - "***********************************************************************\n" - "Warning: %s is not a\n" - "setuptools-generated site.py. It will not be overwritten.\n" - "***********************************************************************\n" - ) % (sitepy,) - self.sitepy_installed = True - return - - if current != source: - log.info("Creating %s", sitepy) - if not self.dry_run: - ensure_directory(sitepy) - f = open(sitepy,'wb') - f.write(source) - f.close() - self.byte_compile([sitepy]) - - self.sitepy_installed = True - - - - - - - - - - - - - INSTALL_SCHEMES = dict( - posix = dict( - install_dir = '$base/lib/python$py_version_short/site-packages', - script_dir = '$base/bin', - ), - ) - - DEFAULT_SCHEME = dict( - install_dir = '$base/Lib/site-packages', - script_dir = '$base/Scripts', - ) - - def _expand(self, *attrs): - config_vars = self.get_finalized_command('install').config_vars - - if self.prefix: - # Set default install_dir/scripts from --prefix - config_vars = config_vars.copy() - config_vars['base'] = self.prefix - scheme = self.INSTALL_SCHEMES.get(os.name,self.DEFAULT_SCHEME) - for attr,val in scheme.items(): - if getattr(self,attr,None) is None: - setattr(self,attr,val) - - from distutils.util import subst_vars - for attr in attrs: - val = getattr(self, attr) - if val is not None: - val = subst_vars(val, config_vars) - if os.name == 'posix': - val = os.path.expanduser(val) - setattr(self, attr, val) - - - - - - - - - -def get_site_dirs(): - # return a list of 'site' dirs - sitedirs = filter(None,os.environ.get('PYTHONPATH','').split(os.pathsep)) - prefixes = [sys.prefix] - if sys.exec_prefix != sys.prefix: - prefixes.append(sys.exec_prefix) - for prefix in prefixes: - if prefix: - if sys.platform in ('os2emx', 'riscos'): - sitedirs.append(os.path.join(prefix, "Lib", "site-packages")) - elif os.sep == '/': - sitedirs.extend([os.path.join(prefix, - "lib", - "python" + sys.version[:3], - "site-packages"), - os.path.join(prefix, "lib", "site-python")]) - else: - sitedirs.extend( - [prefix, os.path.join(prefix, "lib", "site-packages")] - ) - if sys.platform == 'darwin': - # for framework builds *only* we add the standard Apple - # locations. Currently only per-user, but /Library and - # /Network/Library could be added too - if 'Python.framework' in prefix: - home = os.environ.get('HOME') - if home: - sitedirs.append( - os.path.join(home, - 'Library', - 'Python', - sys.version[:3], - 'site-packages')) - for plat_specific in (0,1): - site_lib = get_python_lib(plat_specific) - if site_lib not in sitedirs: sitedirs.append(site_lib) - - sitedirs = map(normalize_path, sitedirs) - return sitedirs - - -def expand_paths(inputs): - """Yield sys.path directories that might contain "old-style" packages""" - - seen = {} - - for dirname in inputs: - dirname = normalize_path(dirname) - if dirname in seen: - continue - - seen[dirname] = 1 - if not os.path.isdir(dirname): - continue - - files = os.listdir(dirname) - yield dirname, files - - for name in files: - if not name.endswith('.pth'): - # We only care about the .pth files - continue - if name in ('easy-install.pth','setuptools.pth'): - # Ignore .pth files that we control - continue - - # Read the .pth file - f = open(os.path.join(dirname,name)) - lines = list(yield_lines(f)) - f.close() - - # Yield existing non-dupe, non-import directory lines from it - for line in lines: - if not line.startswith("import"): - line = normalize_path(line.rstrip()) - if line not in seen: - seen[line] = 1 - if not os.path.isdir(line): - continue - yield line, os.listdir(line) - - -def extract_wininst_cfg(dist_filename): - """Extract configuration data from a bdist_wininst .exe - - Returns a ConfigParser.RawConfigParser, or None - """ - f = open(dist_filename,'rb') - try: - endrec = zipfile._EndRecData(f) - if endrec is None: - return None - - prepended = (endrec[9] - endrec[5]) - endrec[6] - if prepended < 12: # no wininst data here - return None - f.seek(prepended-12) - - import struct, StringIO, ConfigParser - tag, cfglen, bmlen = struct.unpack("egg path translations for a given .exe file""" - - prefixes = [ - ('PURELIB/', ''), ('PLATLIB/pywin32_system32', ''), - ('PLATLIB/', ''), - ('SCRIPTS/', 'EGG-INFO/scripts/') - ] - z = zipfile.ZipFile(exe_filename) - try: - for info in z.infolist(): - name = info.filename - parts = name.split('/') - if len(parts)==3 and parts[2]=='PKG-INFO': - if parts[1].endswith('.egg-info'): - prefixes.insert(0,('/'.join(parts[:2]), 'EGG-INFO/')) - break - if len(parts)!=2 or not name.endswith('.pth'): - continue - if name.endswith('-nspkg.pth'): - continue - if parts[0].upper() in ('PURELIB','PLATLIB'): - for pth in yield_lines(z.read(name)): - pth = pth.strip().replace('\\','/') - if not pth.startswith('import'): - prefixes.append((('%s/%s/' % (parts[0],pth)), '')) - finally: - z.close() - prefixes = [(x.lower(),y) for x, y in prefixes] - prefixes.sort(); prefixes.reverse() - return prefixes - - -def parse_requirement_arg(spec): - try: - return Requirement.parse(spec) - except ValueError: - raise DistutilsError( - "Not a URL, existing file, or requirement spec: %r" % (spec,) - ) - -class PthDistributions(Environment): - """A .pth file with Distribution paths in it""" - - dirty = False - - def __init__(self, filename, sitedirs=()): - self.filename = filename; self.sitedirs=map(normalize_path, sitedirs) - self.basedir = normalize_path(os.path.dirname(self.filename)) - self._load(); Environment.__init__(self, [], None, None) - for path in yield_lines(self.paths): - map(self.add, find_distributions(path, True)) - - def _load(self): - self.paths = [] - saw_import = False - seen = dict.fromkeys(self.sitedirs) - if os.path.isfile(self.filename): - for line in open(self.filename,'rt'): - if line.startswith('import'): - saw_import = True - continue - path = line.rstrip() - self.paths.append(path) - if not path.strip() or path.strip().startswith('#'): - continue - # skip non-existent paths, in case somebody deleted a package - # manually, and duplicate paths as well - path = self.paths[-1] = normalize_path( - os.path.join(self.basedir,path) - ) - if not os.path.exists(path) or path in seen: - self.paths.pop() # skip it - self.dirty = True # we cleaned up, so we're dirty now :) - continue - seen[path] = 1 - - if self.paths and not saw_import: - self.dirty = True # ensure anything we touch has import wrappers - while self.paths and not self.paths[-1].strip(): - self.paths.pop() - - def save(self): - """Write changed .pth file back to disk""" - if not self.dirty: - return - - data = '\n'.join(map(self.make_relative,self.paths)) - if data: - log.debug("Saving %s", self.filename) - data = ( - "import sys; sys.__plen = len(sys.path)\n" - "%s\n" - "import sys; new=sys.path[sys.__plen:];" - " del sys.path[sys.__plen:];" - " p=getattr(sys,'__egginsert',len(os.environ.get('PYTHONPATH','').split(os.pathsep))); sys.path[p:p]=new;" - " sys.__egginsert = p+len(new)\n" - ) % data - - if os.path.islink(self.filename): - os.unlink(self.filename) - f = open(self.filename,'wb') - f.write(data); f.close() - - elif os.path.exists(self.filename): - log.debug("Deleting empty %s", self.filename) - os.unlink(self.filename) - - self.dirty = False - - def add(self,dist): - """Add `dist` to the distribution map""" - if dist.location not in self.paths and dist.location not in self.sitedirs: - self.paths.append(dist.location); self.dirty = True - Environment.add(self,dist) - - def remove(self,dist): - """Remove `dist` from the distribution map""" - while dist.location in self.paths: - self.paths.remove(dist.location); self.dirty = True - Environment.remove(self,dist) - - - def make_relative(self,path): - npath, last = os.path.split(normalize_path(path)) - baselen = len(self.basedir) - parts = [last] - sep = os.altsep=='/' and '/' or os.sep - while len(npath)>=baselen: - if npath==self.basedir: - parts.append(os.curdir) - parts.reverse() - return sep.join(parts) - npath, last = os.path.split(npath) - parts.append(last) - else: - return path - -def get_script_header(script_text, executable=sys_executable, wininst=False): - """Create a #! line, getting options (if any) from script_text""" - from distutils.command.build_scripts import first_line_re - first = (script_text+'\n').splitlines()[0] - match = first_line_re.match(first) - options = '' - if match: - options = match.group(1) or '' - if options: options = ' '+options - if wininst: - executable = "python.exe" - else: - executable = nt_quote_arg(executable) - hdr = "#!%(executable)s%(options)s\n" % locals() - if unicode(hdr,'ascii','ignore').encode('ascii') != hdr: - # Non-ascii path to sys.executable, use -x to prevent warnings - if options: - if options.strip().startswith('-'): - options = ' -x'+options.strip()[1:] - # else: punt, we can't do it, let the warning happen anyway - else: - options = ' -x' - executable = fix_jython_executable(executable, options) - hdr = "#!%(executable)s%(options)s\n" % locals() - return hdr - -def auto_chmod(func, arg, exc): - if func is os.remove and os.name=='nt': - chmod(arg, stat.S_IWRITE) - return func(arg) - exc = sys.exc_info() - raise exc[0], (exc[1][0], exc[1][1] + (" %s %s" % (func,arg))) - -def uncache_zipdir(path): - """Ensure that the importer caches dont have stale info for `path`""" - from zipimport import _zip_directory_cache as zdc - _uncache(path, zdc) - _uncache(path, sys.path_importer_cache) - -def _uncache(path, cache): - if path in cache: - del cache[path] - else: - path = normalize_path(path) - for p in cache: - if normalize_path(p)==path: - del cache[p] - return - -def is_python(text, filename=''): - "Is this string a valid Python script?" - try: - compile(text, filename, 'exec') - except (SyntaxError, TypeError): - return False - else: - return True - -def is_sh(executable): - """Determine if the specified executable is a .sh (contains a #! line)""" - try: - fp = open(executable) - magic = fp.read(2) - fp.close() - except (OSError,IOError): return executable - return magic == '#!' - -def nt_quote_arg(arg): - """Quote a command line argument according to Windows parsing rules""" - - result = [] - needquote = False - nb = 0 - - needquote = (" " in arg) or ("\t" in arg) - if needquote: - result.append('"') - - for c in arg: - if c == '\\': - nb += 1 - elif c == '"': - # double preceding backslashes, then add a \" - result.append('\\' * (nb*2) + '\\"') - nb = 0 - else: - if nb: - result.append('\\' * nb) - nb = 0 - result.append(c) - - if nb: - result.append('\\' * nb) - - if needquote: - result.append('\\' * nb) # double the trailing backslashes - result.append('"') - - return ''.join(result) - - - - - - - - - -def is_python_script(script_text, filename): - """Is this text, as a whole, a Python script? (as opposed to shell/bat/etc. - """ - if filename.endswith('.py') or filename.endswith('.pyw'): - return True # extension says it's Python - if is_python(script_text, filename): - return True # it's syntactically valid Python - if script_text.startswith('#!'): - # It begins with a '#!' line, so check if 'python' is in it somewhere - return 'python' in script_text.splitlines()[0].lower() - - return False # Not any Python I can recognize - -try: - from os import chmod as _chmod -except ImportError: - # Jython compatibility - def _chmod(*args): pass - -def chmod(path, mode): - log.debug("changing mode of %s to %o", path, mode) - try: - _chmod(path, mode) - except os.error, e: - log.debug("chmod failed: %s", e) - -def fix_jython_executable(executable, options): - if sys.platform.startswith('java') and is_sh(executable): - # Workaround Jython's sys.executable being a .sh (an invalid - # shebang line interpreter) - if options: - # Can't apply the workaround, leave it broken - log.warn("WARNING: Unable to adapt shebang line for Jython," - " the following script is NOT executable\n" - " see http://bugs.jython.org/issue1112 for" - " more information.") - else: - return '/usr/bin/env %s' % executable - return executable - - -def get_script_args(dist, executable=sys_executable, wininst=False, script_dir=None): - """Yield write_script() argument tuples for a distribution's entrypoints""" - spec = str(dist.as_requirement()) - requires = [spec] + [str(r) for r in dist.requires()] - header = get_script_header("", executable, wininst) - generated_by = "# generated by zetuptoolz %s" % (setuptools_version,) - - for group in 'console_scripts', 'gui_scripts': - for name, ep in dist.get_entry_map(group).items(): - script_head, script_tail = (( - "# EASY-INSTALL-ENTRY-SCRIPT: %(spec)r,%(group)r,%(name)r\n" - "%(generated_by)s\n" - "__requires__ = %(requires)r\n" - "import sys\n" - "from pkg_resources import load_entry_point\n" - "\n" - ) % locals(), ( - "sys.exit(\n" - " load_entry_point(%(spec)r, %(group)r, %(name)r)()\n" - ")\n" - ) % locals()) - - if wininst or sys.platform == "win32": - # On Windows/wininst, add a .py[w] extension. Delete any existing - # -script.py[w], .exe, and .exe.manifest. - if group=='gui_scripts': - ext = '.pyw' - old = ['','.pyw','-script.pyw','.exe','.exe.manifest'] - which_python = 'pythonw.exe' - new_header = re.sub('(?i)python.exe', which_python, header) - else: - ext = '.pyscript' - old = ['','.pyscript','.py','.pyc','.pyo','-script.py','.exe','.exe.manifest'] - which_python = 'python.exe' - new_header = re.sub('(?i)pythonw.exe', which_python, header) - - len_ext = len(ext) - script_head += ( - "# If this script doesn't work for you, make sure that the %(ext)s\n" - "# extension is included in the PATHEXT environment variable, and is\n" - "# associated with %(which_python)s in the registry.\n" - "\n" - "if sys.argv[0].endswith(%(ext)r):\n" - " sys.argv[0] = sys.argv[0][:-%(len_ext)r]\n" - "\n" - ) % locals() - - if os.path.exists(new_header[2:-1]) or sys.platform != 'win32': - hdr = new_header - else: - hdr = header - yield (name+ext, hdr + script_head + script_tail, 't', [name+x for x in old]) - - # Also write a shell script that runs the .pyscript, for cygwin. - # - # We can't use a Python script, because the Python interpreter that we want - # to use is the native Windows one, which won't understand a cygwin path. - # Windows paths written with forward slashes are universally understood - # (by native Python, cygwin Python, and bash), so we'll use 'cygpath -m' to - # get the directory from which the script was run in that form. This makes - # the cygwin script and .pyscript position-independent, provided they are - # in the same directory. - - def quote_path(s): - return "\\'".join("'" + p.replace('\\', '/') + "'" for p in s.split("'")) - - pyscript = quote_path("/"+name+ext) - python_path = quote_path(sys.executable) - shell_script_text = ( - '#!/bin/sh\n' - '%(generated_by)s\n' - '\n' - 'ScriptDir=`cygpath -m "$0/.."`\n' - '%(python_path)s "${ScriptDir}"%(pyscript)s "$@"\n' - ) % locals() - yield (name, shell_script_text, 'b') - else: - # On other platforms, we assume the right thing to do is to - # just write the stub with no extension. - yield (name, header + script_head + script_tail) - - -def rmtree(path, ignore_errors=False, onerror=auto_chmod): - """Recursively delete a directory tree. - - This code is taken from the Python 2.4 version of 'shutil', because - the 2.3 version doesn't really work right. - """ - if ignore_errors: - def onerror(*args): - pass - elif onerror is None: - def onerror(*args): - raise - names = [] - try: - names = os.listdir(path) - except os.error, err: - onerror(os.listdir, path, sys.exc_info()) - for name in names: - fullname = os.path.join(path, name) - try: - mode = os.lstat(fullname).st_mode - except os.error: - mode = 0 - if stat.S_ISDIR(mode): - rmtree(fullname, ignore_errors, onerror) - else: - try: - os.remove(fullname) - except os.error, err: - onerror(os.remove, fullname, sys.exc_info()) - try: - os.rmdir(path) - except os.error: - onerror(os.rmdir, path, sys.exc_info()) - -def bootstrap(): - # This function is called when setuptools*.egg is run using /bin/sh - import setuptools; argv0 = os.path.dirname(setuptools.__path__[0]) - sys.argv[0] = argv0; sys.argv.append(argv0); main() - - -def main(argv=None, **kw): - from setuptools import setup - from setuptools.dist import Distribution - import distutils.core - - USAGE = """\ -usage: %(script)s [options] requirement_or_url ... - or: %(script)s --help -""" - - def gen_usage (script_name): - script = os.path.basename(script_name) - return USAGE % vars() - - def with_ei_usage(f): - old_gen_usage = distutils.core.gen_usage - try: - distutils.core.gen_usage = gen_usage - return f() - finally: - distutils.core.gen_usage = old_gen_usage - - class DistributionWithoutHelpCommands(Distribution): - common_usage = "" - def _show_help(self,*args,**kw): - with_ei_usage(lambda: Distribution._show_help(self,*args,**kw)) - - if argv is None: - argv = sys.argv[1:] - - with_ei_usage(lambda: - setup( - script_args = ['-q','easy_install', '-v']+argv, - script_name = sys.argv[0] or 'easy_install', - distclass=DistributionWithoutHelpCommands, **kw - ) - ) - - - - diff -Nru tahoe-lafs-1.9.2/setuptools-0.6c16dev3.egg/setuptools/command/egg_info.py tahoe-lafs-1.10.0/setuptools-0.6c16dev3.egg/setuptools/command/egg_info.py --- tahoe-lafs-1.9.2/setuptools-0.6c16dev3.egg/setuptools/command/egg_info.py 2012-05-14 02:07:18.000000000 +0000 +++ tahoe-lafs-1.10.0/setuptools-0.6c16dev3.egg/setuptools/command/egg_info.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,451 +0,0 @@ -"""setuptools.command.egg_info - -Create a distribution's .egg-info directory and contents""" - -# This module should be kept compatible with Python 2.3 -import os, re -from setuptools import Command -from distutils.errors import * -from distutils import log -from setuptools.command.sdist import sdist -from distutils.util import convert_path -from distutils.filelist import FileList -from pkg_resources import parse_requirements, safe_name, parse_version, \ - safe_version, yield_lines, EntryPoint, iter_entry_points, to_filename -from sdist import walk_revctrl - -class egg_info(Command): - description = "create a distribution's .egg-info directory" - - user_options = [ - ('egg-base=', 'e', "directory containing .egg-info directories" - " (default: top of the source tree)"), - ('tag-svn-revision', 'r', - "Add subversion revision ID to version number"), - ('tag-date', 'd', "Add date stamp (e.g. 20050528) to version number"), - ('tag-build=', 'b', "Specify explicit tag to add to version number"), - ('no-svn-revision', 'R', - "Don't add subversion revision ID [default]"), - ('no-date', 'D', "Don't include date stamp [default]"), - ] - - boolean_options = ['tag-date', 'tag-svn-revision'] - negative_opt = {'no-svn-revision': 'tag-svn-revision', - 'no-date': 'tag-date'} - - - - - - - - def initialize_options(self): - self.egg_name = None - self.egg_version = None - self.egg_base = None - self.egg_info = None - self.tag_build = None - self.tag_svn_revision = 0 - self.tag_date = 0 - self.broken_egg_info = False - self.vtags = None - - def save_version_info(self, filename): - from setopt import edit_config - edit_config( - filename, - {'egg_info': - {'tag_svn_revision':0, 'tag_date': 0, 'tag_build': self.tags()} - } - ) - - - - - - - - - - - - - - - - - - - - - - - def finalize_options (self): - self.egg_name = safe_name(self.distribution.get_name()) - self.vtags = self.tags() - self.egg_version = self.tagged_version() - - try: - list( - parse_requirements('%s==%s' % (self.egg_name,self.egg_version)) - ) - except ValueError: - raise DistutilsOptionError( - "Invalid distribution name or version syntax: %s-%s" % - (self.egg_name,self.egg_version) - ) - - if self.egg_base is None: - dirs = self.distribution.package_dir - self.egg_base = (dirs or {}).get('',os.curdir) - - self.ensure_dirname('egg_base') - self.egg_info = to_filename(self.egg_name)+'.egg-info' - if self.egg_base != os.curdir: - self.egg_info = os.path.join(self.egg_base, self.egg_info) - if '-' in self.egg_name: self.check_broken_egg_info() - - # Set package version for the benefit of dumber commands - # (e.g. sdist, bdist_wininst, etc.) - # - self.distribution.metadata.version = self.egg_version - - # If we bootstrapped around the lack of a PKG-INFO, as might be the - # case in a fresh checkout, make sure that any special tags get added - # to the version info - # - pd = self.distribution._patched_dist - if pd is not None and pd.key==self.egg_name.lower(): - pd._version = self.egg_version - pd._parsed_version = parse_version(self.egg_version) - self.distribution._patched_dist = None - - - def write_or_delete_file(self, what, filename, data, force=False): - """Write `data` to `filename` or delete if empty - - If `data` is non-empty, this routine is the same as ``write_file()``. - If `data` is empty but not ``None``, this is the same as calling - ``delete_file(filename)`. If `data` is ``None``, then this is a no-op - unless `filename` exists, in which case a warning is issued about the - orphaned file (if `force` is false), or deleted (if `force` is true). - """ - if data: - self.write_file(what, filename, data) - elif os.path.exists(filename): - if data is None and not force: - log.warn( - "%s not set in setup(), but %s exists", what, filename - ) - return - else: - self.delete_file(filename) - - def write_file(self, what, filename, data): - """Write `data` to `filename` (if not a dry run) after announcing it - - `what` is used in a log message to identify what is being written - to the file. - """ - log.info("writing %s to %s", what, filename) - if not self.dry_run: - f = open(filename, 'wb') - f.write(data) - f.close() - - def delete_file(self, filename): - """Delete `filename` (if not a dry run) after announcing it""" - log.info("deleting %s", filename) - if not self.dry_run: - os.unlink(filename) - - def tagged_version(self): - return safe_version(self.distribution.get_version() + self.vtags) - - def run(self): - self.mkpath(self.egg_info) - installer = self.distribution.fetch_build_egg - for ep in iter_entry_points('egg_info.writers'): - writer = ep.load(installer=installer) - writer(self, ep.name, os.path.join(self.egg_info,ep.name)) - - # Get rid of native_libs.txt if it was put there by older bdist_egg - nl = os.path.join(self.egg_info, "native_libs.txt") - if os.path.exists(nl): - self.delete_file(nl) - - self.find_sources() - - def tags(self): - version = '' - if self.tag_build: - version+=self.tag_build - if self.tag_svn_revision and ( - os.path.exists('.svn') or os.path.exists('PKG-INFO') - ): version += '-r%s' % self.get_svn_revision() - if self.tag_date: - import time; version += time.strftime("-%Y%m%d") - return version - - - - - - - - - - - - - - - - - - def get_svn_revision(self): - revision = 0 - urlre = re.compile('url="([^"]+)"') - revre = re.compile('committed-rev="(\d+)"') - - for base,dirs,files in os.walk(os.curdir): - if '.svn' not in dirs: - dirs[:] = [] - continue # no sense walking uncontrolled subdirs - dirs.remove('.svn') - f = open(os.path.join(base,'.svn','entries')) - data = f.read() - f.close() - - if data.startswith('9 and d[9]]+[0]) - if base==os.curdir: - base_url = dirurl+'/' # save the root url - elif not dirurl.startswith(base_url): - dirs[:] = [] - continue # not part of the same svn tree, skip it - revision = max(revision, localrev) - - return str(revision or get_pkg_info_revision()) - - - - - def find_sources(self): - """Generate SOURCES.txt manifest file""" - manifest_filename = os.path.join(self.egg_info,"SOURCES.txt") - mm = manifest_maker(self.distribution) - mm.manifest = manifest_filename - mm.run() - self.filelist = mm.filelist - - def check_broken_egg_info(self): - bei = self.egg_name+'.egg-info' - if self.egg_base != os.curdir: - bei = os.path.join(self.egg_base, bei) - if os.path.exists(bei): - log.warn( - "-"*78+'\n' - "Note: Your current .egg-info directory has a '-' in its name;" - '\nthis will not work correctly with "setup.py develop".\n\n' - 'Please rename %s to %s to correct this problem.\n'+'-'*78, - bei, self.egg_info - ) - self.broken_egg_info = self.egg_info - self.egg_info = bei # make it work for now - -class FileList(FileList): - """File list that accepts only existing, platform-independent paths""" - - def append(self, item): - if item.endswith('\r'): # Fix older sdists built on Windows - item = item[:-1] - path = convert_path(item) - if os.path.exists(path): - self.files.append(path) - - - - - - - - - -class manifest_maker(sdist): - - template = "MANIFEST.in" - - def initialize_options (self): - self.use_defaults = 1 - self.prune = 1 - self.manifest_only = 1 - self.force_manifest = 1 - - def finalize_options(self): - pass - - def run(self): - self.filelist = FileList() - if not os.path.exists(self.manifest): - self.write_manifest() # it must exist so it'll get in the list - self.filelist.findall() - self.add_defaults() - if os.path.exists(self.template): - self.read_template() - self.prune_file_list() - self.filelist.sort() - self.filelist.remove_duplicates() - self.write_manifest() - - def write_manifest (self): - """Write the file list in 'self.filelist' (presumably as filled in - by 'add_defaults()' and 'read_template()') to the manifest file - named by 'self.manifest'. - """ - files = self.filelist.files - if os.sep!='/': - files = [f.replace(os.sep,'/') for f in files] - self.execute(write_file, (self.manifest, files), - "writing manifest file '%s'" % self.manifest) - - def warn(self, msg): # suppress missing-file warnings from sdist - if not msg.startswith("standard file not found:"): - sdist.warn(self, msg) - - def add_defaults(self): - sdist.add_defaults(self) - self.filelist.append(self.template) - self.filelist.append(self.manifest) - rcfiles = list(walk_revctrl()) - if rcfiles: - self.filelist.extend(rcfiles) - elif os.path.exists(self.manifest): - self.read_manifest() - ei_cmd = self.get_finalized_command('egg_info') - self.filelist.include_pattern("*", prefix=ei_cmd.egg_info) - - def prune_file_list (self): - build = self.get_finalized_command('build') - base_dir = self.distribution.get_fullname() - self.filelist.exclude_pattern(None, prefix=build.build_base) - self.filelist.exclude_pattern(None, prefix=base_dir) - sep = re.escape(os.sep) - self.filelist.exclude_pattern(sep+r'(RCS|CVS|\.svn)'+sep, is_regex=1) - - -def write_file (filename, contents): - """Create a file with the specified name and write 'contents' (a - sequence of strings without line terminators) to it. - """ - f = open(filename, "wb") # always write POSIX-style manifest - f.write("\n".join(contents)) - f.close() - - - - - - - - - - - - - -def write_pkg_info(cmd, basename, filename): - log.info("writing %s", filename) - if not cmd.dry_run: - metadata = cmd.distribution.metadata - metadata.version, oldver = cmd.egg_version, metadata.version - metadata.name, oldname = cmd.egg_name, metadata.name - try: - # write unescaped data to PKG-INFO, so older pkg_resources - # can still parse it - metadata.write_pkg_info(cmd.egg_info) - finally: - metadata.name, metadata.version = oldname, oldver - - safe = getattr(cmd.distribution,'zip_safe',None) - import bdist_egg; bdist_egg.write_safety_flag(cmd.egg_info, safe) - -def warn_depends_obsolete(cmd, basename, filename): - if os.path.exists(filename): - log.warn( - "WARNING: 'depends.txt' is not used by setuptools 0.6!\n" - "Use the install_requires/extras_require setup() args instead." - ) - - -def write_requirements(cmd, basename, filename): - dist = cmd.distribution - data = ['\n'.join(yield_lines(dist.install_requires or ()))] - for extra,reqs in (dist.extras_require or {}).items(): - data.append('\n\n[%s]\n%s' % (extra, '\n'.join(yield_lines(reqs)))) - cmd.write_or_delete_file("requirements", filename, ''.join(data)) - -def write_toplevel_names(cmd, basename, filename): - pkgs = dict.fromkeys( - [k.split('.',1)[0] - for k in cmd.distribution.iter_distribution_names() - ] - ) - cmd.write_file("top-level names", filename, '\n'.join(pkgs)+'\n') - - - -def overwrite_arg(cmd, basename, filename): - write_arg(cmd, basename, filename, True) - -def write_arg(cmd, basename, filename, force=False): - argname = os.path.splitext(basename)[0] - value = getattr(cmd.distribution, argname, None) - if value is not None: - value = '\n'.join(value)+'\n' - cmd.write_or_delete_file(argname, filename, value, force) - -def write_entries(cmd, basename, filename): - ep = cmd.distribution.entry_points - - if isinstance(ep,basestring) or ep is None: - data = ep - elif ep is not None: - data = [] - for section, contents in ep.items(): - if not isinstance(contents,basestring): - contents = EntryPoint.parse_group(section, contents) - contents = '\n'.join(map(str,contents.values())) - data.append('[%s]\n%s\n\n' % (section,contents)) - data = ''.join(data) - - cmd.write_or_delete_file('entry points', filename, data, True) - -def get_pkg_info_revision(): - # See if we can get a -r### off of PKG-INFO, in case this is an sdist of - # a subversion revision - # - if os.path.exists('PKG-INFO'): - f = open('PKG-INFO','rU') - for line in f: - match = re.match(r"Version:.*-r(\d+)\s*$", line) - if match: - return int(match.group(1)) - return 0 - - - -# diff -Nru tahoe-lafs-1.9.2/setuptools-0.6c16dev3.egg/setuptools/command/install.py tahoe-lafs-1.10.0/setuptools-0.6c16dev3.egg/setuptools/command/install.py --- tahoe-lafs-1.9.2/setuptools-0.6c16dev3.egg/setuptools/command/install.py 2012-05-14 02:07:18.000000000 +0000 +++ tahoe-lafs-1.10.0/setuptools-0.6c16dev3.egg/setuptools/command/install.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,123 +0,0 @@ -import setuptools, sys, glob -from distutils.command.install import install as _install -from distutils.errors import DistutilsArgError - -class install(_install): - """Use easy_install to install the package, w/dependencies""" - - user_options = _install.user_options + [ - ('old-and-unmanageable', None, "Try not to use this!"), - ('single-version-externally-managed', None, - "used by system package builders to create 'flat' eggs"), - ] - boolean_options = _install.boolean_options + [ - 'old-and-unmanageable', 'single-version-externally-managed', - ] - new_commands = [ - ('install_egg_info', lambda self: True), - ('install_scripts', lambda self: True), - ] - _nc = dict(new_commands) - sub_commands = [ - cmd for cmd in _install.sub_commands if cmd[0] not in _nc - ] + new_commands - - def initialize_options(self): - _install.initialize_options(self) - self.old_and_unmanageable = None - self.single_version_externally_managed = None - self.no_compile = None # make DISTUTILS_DEBUG work right! - - def finalize_options(self): - _install.finalize_options(self) - if self.root: - self.single_version_externally_managed = True - elif self.single_version_externally_managed: - if not self.root and not self.record: - raise DistutilsArgError( - "You must specify --record or --root when building system" - " packages" - ) - - def handle_extra_path(self): - if self.root or self.single_version_externally_managed: - # explicit backward-compatibility mode, allow extra_path to work - return _install.handle_extra_path(self) - - # Ignore extra_path when installing an egg (or being run by another - # command without --root or --single-version-externally-managed - self.path_file = None - self.extra_dirs = '' - - def run(self): - self.old_run() - if sys.platform == "win32": - from setuptools.command.scriptsetup import do_scriptsetup - do_scriptsetup() - - def old_run(self): - # Explicit request for old-style install? Just do it - if self.old_and_unmanageable or self.single_version_externally_managed: - return _install.run(self) - - # Attempt to detect whether we were called from setup() or by another - # command. If we were called by setup(), our caller will be the - # 'run_command' method in 'distutils.dist', and *its* caller will be - # the 'run_commands' method. If we were called any other way, our - # immediate caller *might* be 'run_command', but it won't have been - # called by 'run_commands'. This is slightly kludgy, but seems to - # work. - # - caller = sys._getframe(2) - caller_module = caller.f_globals.get('__name__','') - caller_name = caller.f_code.co_name - - if caller_module != 'distutils.dist' or caller_name!='run_commands': - # We weren't called from the command line or setup(), so we - # should run in backward-compatibility mode to support bdist_* - # commands. - _install.run(self) - else: - self.do_egg_install() - - def do_egg_install(self): - - easy_install = self.distribution.get_command_class('easy_install') - - cmd = easy_install( - self.distribution, args="x", root=self.root, record=self.record, - ) - cmd.ensure_finalized() # finalize before bdist_egg munges install cmd - cmd.always_copy_from = '.' # make sure local-dir eggs get installed - - # pick up setup-dir .egg files only: no .egg-info - cmd.package_index.scan(glob.glob('*.egg')) - - self.run_command('bdist_egg') - args = [self.distribution.get_command_obj('bdist_egg').egg_output] - - if setuptools.bootstrap_install_from: - # Bootstrap self-installation of setuptools - args.insert(0, setuptools.bootstrap_install_from) - - cmd.args = args - cmd.run() - setuptools.bootstrap_install_from = None - - - - - - - - - - - - - - - - - -# diff -Nru tahoe-lafs-1.9.2/setuptools-0.6c16dev3.egg/setuptools/command/install_egg_info.py tahoe-lafs-1.10.0/setuptools-0.6c16dev3.egg/setuptools/command/install_egg_info.py --- tahoe-lafs-1.9.2/setuptools-0.6c16dev3.egg/setuptools/command/install_egg_info.py 2012-05-14 02:07:18.000000000 +0000 +++ tahoe-lafs-1.10.0/setuptools-0.6c16dev3.egg/setuptools/command/install_egg_info.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,123 +0,0 @@ -from setuptools import Command -from setuptools.archive_util import unpack_archive -from distutils import log, dir_util -import os, shutil, pkg_resources - -class install_egg_info(Command): - """Install an .egg-info directory for the package""" - - description = "Install an .egg-info directory for the package" - - user_options = [ - ('install-dir=', 'd', "directory to install to"), - ] - - def initialize_options(self): - self.install_dir = None - - def finalize_options(self): - self.set_undefined_options('install_lib',('install_dir','install_dir')) - ei_cmd = self.get_finalized_command("egg_info") - basename = pkg_resources.Distribution( - None, None, ei_cmd.egg_name, ei_cmd.egg_version - ).egg_name()+'.egg-info' - self.source = ei_cmd.egg_info - self.target = os.path.join(self.install_dir, basename) - self.outputs = [self.target] - - def run(self): - self.run_command('egg_info') - target = self.target - if os.path.isdir(self.target) and not os.path.islink(self.target): - dir_util.remove_tree(self.target, dry_run=self.dry_run) - elif os.path.exists(self.target): - self.execute(os.unlink,(self.target,),"Removing "+self.target) - if not self.dry_run: - pkg_resources.ensure_directory(self.target) - self.execute(self.copytree, (), - "Copying %s to %s" % (self.source, self.target) - ) - self.install_namespaces() - - def get_outputs(self): - return self.outputs - - def copytree(self): - # Copy the .egg-info tree to site-packages - def skimmer(src,dst): - # filter out source-control directories; note that 'src' is always - # a '/'-separated path, regardless of platform. 'dst' is a - # platform-specific path. - for skip in '.svn/','CVS/': - if src.startswith(skip) or '/'+skip in src: - return None - self.outputs.append(dst) - log.debug("Copying %s to %s", src, dst) - return dst - unpack_archive(self.source, self.target, skimmer) - - - - - - - - - - - - - - - - - - - - - - - - - - def install_namespaces(self): - nsp = self._get_all_ns_packages() - if not nsp: return - filename,ext = os.path.splitext(self.target) - filename += '-nspkg.pth'; self.outputs.append(filename) - log.info("Installing %s",filename) - if not self.dry_run: - f = open(filename,'wb') - for pkg in nsp: - pth = tuple(pkg.split('.')) - trailer = '\n' - if '.' in pkg: - trailer = ( - "; m and setattr(sys.modules[%r], %r, m)\n" - % ('.'.join(pth[:-1]), pth[-1]) - ) - f.write( - "import sys,new,os; " - "p = os.path.join(sys._getframe(1).f_locals['sitedir'], " - "*%(pth)r); " - "ie = os.path.exists(os.path.join(p,'__init__.py')); " - "m = not ie and " - "sys.modules.setdefault(%(pkg)r,new.module(%(pkg)r)); " - "mp = (m or []) and m.__dict__.setdefault('__path__',[]); " - "(p not in mp) and mp.append(p)%(trailer)s" - % locals() - ) - f.close() - - def _get_all_ns_packages(self): - nsp = {} - for pkg in self.distribution.namespace_packages or []: - pkg = pkg.split('.') - while pkg: - nsp['.'.join(pkg)] = 1 - pkg.pop() - nsp=list(nsp) - nsp.sort() # set up shorter names first - return nsp - - diff -Nru tahoe-lafs-1.9.2/setuptools-0.6c16dev3.egg/setuptools/command/install_lib.py tahoe-lafs-1.10.0/setuptools-0.6c16dev3.egg/setuptools/command/install_lib.py --- tahoe-lafs-1.9.2/setuptools-0.6c16dev3.egg/setuptools/command/install_lib.py 2012-05-14 02:07:18.000000000 +0000 +++ tahoe-lafs-1.10.0/setuptools-0.6c16dev3.egg/setuptools/command/install_lib.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,76 +0,0 @@ -from distutils.command.install_lib import install_lib as _install_lib -import os - -class install_lib(_install_lib): - """Don't add compiled flags to filenames of non-Python files""" - - def _bytecode_filenames (self, py_filenames): - bytecode_files = [] - for py_file in py_filenames: - if not py_file.endswith('.py'): - continue - if self.compile: - bytecode_files.append(py_file + "c") - if self.optimize > 0: - bytecode_files.append(py_file + "o") - - return bytecode_files - - def run(self): - self.build() - outfiles = self.install() - if outfiles is not None: - # always compile, in case we have any extension stubs to deal with - self.byte_compile(outfiles) - - def get_exclusions(self): - exclude = {} - nsp = self.distribution.namespace_packages - - if (nsp and self.get_finalized_command('install') - .single_version_externally_managed - ): - for pkg in nsp: - parts = pkg.split('.') - while parts: - pkgdir = os.path.join(self.install_dir, *parts) - for f in '__init__.py', '__init__.pyc', '__init__.pyo': - exclude[os.path.join(pkgdir,f)] = 1 - parts.pop() - return exclude - - def copy_tree( - self, infile, outfile, - preserve_mode=1, preserve_times=1, preserve_symlinks=0, level=1 - ): - assert preserve_mode and preserve_times and not preserve_symlinks - exclude = self.get_exclusions() - - if not exclude: - return _install_lib.copy_tree(self, infile, outfile) - - # Exclude namespace package __init__.py* files from the output - - from setuptools.archive_util import unpack_directory - from distutils import log - - outfiles = [] - - def pf(src, dst): - if dst in exclude: - log.warn("Skipping installation of %s (namespace package)",dst) - return False - - log.info("copying %s -> %s", src, os.path.dirname(dst)) - outfiles.append(dst) - return dst - - unpack_directory(infile, outfile, pf) - return outfiles - - def get_outputs(self): - outputs = _install_lib.get_outputs(self) - exclude = self.get_exclusions() - if exclude: - return [f for f in outputs if f not in exclude] - return outputs diff -Nru tahoe-lafs-1.9.2/setuptools-0.6c16dev3.egg/setuptools/command/install_scripts.py tahoe-lafs-1.10.0/setuptools-0.6c16dev3.egg/setuptools/command/install_scripts.py --- tahoe-lafs-1.9.2/setuptools-0.6c16dev3.egg/setuptools/command/install_scripts.py 2012-05-14 02:07:18.000000000 +0000 +++ tahoe-lafs-1.10.0/setuptools-0.6c16dev3.egg/setuptools/command/install_scripts.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,82 +0,0 @@ -from distutils.command.install_scripts import install_scripts \ - as _install_scripts -from easy_install import get_script_args, sys_executable, chmod -from pkg_resources import Distribution, PathMetadata, ensure_directory -import os -from distutils import log - -class install_scripts(_install_scripts): - """Do normal script install, plus any egg_info wrapper scripts""" - - def initialize_options(self): - _install_scripts.initialize_options(self) - self.no_ep = False - - def run(self): - self.run_command("egg_info") - if self.distribution.scripts: - _install_scripts.run(self) # run first to set up self.outfiles - else: - self.outfiles = [] - if self.no_ep: - # don't install entry point scripts into .egg file! - return - - ei_cmd = self.get_finalized_command("egg_info") - dist = Distribution( - ei_cmd.egg_base, PathMetadata(ei_cmd.egg_base, ei_cmd.egg_info), - ei_cmd.egg_name, ei_cmd.egg_version, - ) - bs_cmd = self.get_finalized_command('build_scripts') - executable = getattr(bs_cmd,'executable',sys_executable) - is_wininst = getattr( - self.get_finalized_command("bdist_wininst"), '_is_running', False - ) - for args in get_script_args(dist, executable, is_wininst): - self.write_script(*args) - - - - - - def write_script(self, script_name, contents, mode="t", *ignored): - """Write an executable file to the scripts directory""" - log.info("Installing %s script to %s", script_name, self.install_dir) - target = os.path.join(self.install_dir, script_name) - self.outfiles.append(target) - - if not self.dry_run: - ensure_directory(target) - f = open(target,"w"+mode) - f.write(contents) - f.close() - chmod(target,0755) - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff -Nru tahoe-lafs-1.9.2/setuptools-0.6c16dev3.egg/setuptools/command/register.py tahoe-lafs-1.10.0/setuptools-0.6c16dev3.egg/setuptools/command/register.py --- tahoe-lafs-1.9.2/setuptools-0.6c16dev3.egg/setuptools/command/register.py 2012-05-14 02:07:18.000000000 +0000 +++ tahoe-lafs-1.10.0/setuptools-0.6c16dev3.egg/setuptools/command/register.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,10 +0,0 @@ -from distutils.command.register import register as _register - -class register(_register): - __doc__ = _register.__doc__ - - def run(self): - # Make sure that we are using valid current name/version info - self.run_command('egg_info') - _register.run(self) - diff -Nru tahoe-lafs-1.9.2/setuptools-0.6c16dev3.egg/setuptools/command/rotate.py tahoe-lafs-1.10.0/setuptools-0.6c16dev3.egg/setuptools/command/rotate.py --- tahoe-lafs-1.9.2/setuptools-0.6c16dev3.egg/setuptools/command/rotate.py 2012-05-14 02:07:19.000000000 +0000 +++ tahoe-lafs-1.10.0/setuptools-0.6c16dev3.egg/setuptools/command/rotate.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,57 +0,0 @@ -import distutils, os -from setuptools import Command -from distutils.util import convert_path -from distutils import log -from distutils.errors import * - -class rotate(Command): - """Delete older distributions""" - - description = "delete older distributions, keeping N newest files" - user_options = [ - ('match=', 'm', "patterns to match (required)"), - ('dist-dir=', 'd', "directory where the distributions are"), - ('keep=', 'k', "number of matching distributions to keep"), - ] - - boolean_options = [] - - def initialize_options(self): - self.match = None - self.dist_dir = None - self.keep = None - - def finalize_options(self): - if self.match is None: - raise DistutilsOptionError( - "Must specify one or more (comma-separated) match patterns " - "(e.g. '.zip' or '.egg')" - ) - if self.keep is None: - raise DistutilsOptionError("Must specify number of files to keep") - try: - self.keep = int(self.keep) - except ValueError: - raise DistutilsOptionError("--keep must be an integer") - if isinstance(self.match, basestring): - self.match = [ - convert_path(p.strip()) for p in self.match.split(',') - ] - self.set_undefined_options('bdist',('dist_dir', 'dist_dir')) - - def run(self): - self.run_command("egg_info") - from glob import glob - for pattern in self.match: - pattern = self.distribution.get_name()+'*'+pattern - files = glob(os.path.join(self.dist_dir,pattern)) - files = [(os.path.getmtime(f),f) for f in files] - files.sort() - files.reverse() - - log.info("%d file(s) matching %s", len(files), pattern) - files = files[self.keep:] - for (t,f) in files: - log.info("Deleting %s", f) - if not self.dry_run: - os.unlink(f) diff -Nru tahoe-lafs-1.9.2/setuptools-0.6c16dev3.egg/setuptools/command/saveopts.py tahoe-lafs-1.10.0/setuptools-0.6c16dev3.egg/setuptools/command/saveopts.py --- tahoe-lafs-1.9.2/setuptools-0.6c16dev3.egg/setuptools/command/saveopts.py 2012-05-14 02:07:19.000000000 +0000 +++ tahoe-lafs-1.10.0/setuptools-0.6c16dev3.egg/setuptools/command/saveopts.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,24 +0,0 @@ -import distutils, os -from setuptools import Command -from setuptools.command.setopt import edit_config, option_base - -class saveopts(option_base): - """Save command-line options to a file""" - - description = "save supplied options to setup.cfg or other config file" - - def run(self): - dist = self.distribution - commands = dist.command_options.keys() - settings = {} - - for cmd in commands: - - if cmd=='saveopts': - continue # don't save our own options! - - for opt,(src,val) in dist.get_option_dict(cmd).items(): - if src=="command line": - settings.setdefault(cmd,{})[opt] = val - - edit_config(self.filename, settings, self.dry_run) diff -Nru tahoe-lafs-1.9.2/setuptools-0.6c16dev3.egg/setuptools/command/scriptsetup.py tahoe-lafs-1.10.0/setuptools-0.6c16dev3.egg/setuptools/command/scriptsetup.py --- tahoe-lafs-1.9.2/setuptools-0.6c16dev3.egg/setuptools/command/scriptsetup.py 2012-05-14 02:07:19.000000000 +0000 +++ tahoe-lafs-1.10.0/setuptools-0.6c16dev3.egg/setuptools/command/scriptsetup.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,284 +0,0 @@ -from distutils.errors import DistutilsSetupError -from setuptools import Command -import sys - -class scriptsetup(Command): - action = (sys.platform == "win32" - and "set up .pyscript association and PATHEXT variable to run scripts" - or "this does nothing on non-Windows platforms") - - user_options = [ - ('allusers', 'a', - 'make changes for all users of this Windows installation (requires Administrator privileges)'), - ] - boolean_options = ['allusers'] - - def initialize_options(self): - self.allusers = False - - def finalize_options(self): - pass - - def run(self): - if sys.platform != "win32": - print "\n'scriptsetup' isn't needed on non-Windows platforms." - else: - do_scriptsetup(self.allusers) - - -def do_scriptsetup(allusers=False): - print "\nSetting up environment to run scripts for %s..." % (allusers and "all users" or "the current user") - - from _winreg import HKEY_CURRENT_USER, HKEY_LOCAL_MACHINE, HKEY_CLASSES_ROOT, \ - REG_SZ, REG_EXPAND_SZ, KEY_QUERY_VALUE, KEY_SET_VALUE, \ - OpenKey, CreateKey, QueryValueEx, SetValueEx, FlushKey, CloseKey - - USER_ENV = "Environment" - try: - user_env = OpenKey(HKEY_CURRENT_USER, USER_ENV, 0, KEY_QUERY_VALUE) - except WindowsError, e: - raise DistutilsSetupError("I could not read the user environment from the registry.\n%r" % (e,)) - - SYSTEM_ENV = "SYSTEM\\CurrentControlSet\\Control\\Session Manager\\Environment" - try: - system_env = OpenKey(HKEY_LOCAL_MACHINE, SYSTEM_ENV, 0, KEY_QUERY_VALUE) - except WindowsError, e: - raise DistutilsSetupError("I could not read the system environment from the registry.\n%r" % (e,)) - - - # HKEY_CLASSES_ROOT is a merged view that would only confuse us. - # - - USER_CLASSES = "SOFTWARE\\Classes" - try: - user_classes = OpenKey(HKEY_CURRENT_USER, USER_CLASSES, 0, KEY_QUERY_VALUE) - except WindowsError, e: - raise DistutilsSetupError("I could not read the user filetype associations from the registry.\n%r" % (e,)) - - SYSTEM_CLASSES = "SOFTWARE\\Classes" - try: - system_classes = OpenKey(HKEY_LOCAL_MACHINE, SYSTEM_CLASSES, 0, KEY_QUERY_VALUE) - except WindowsError, e: - raise DistutilsSetupError("I could not read the system filetype associations from the registry.\n%r" % (e,)) - - - def query(key, subkey, what): - try: - (value, type) = QueryValueEx(key, subkey) - except WindowsError, e: - if e.winerror == 2: # not found - return None - raise DistutilsSetupError("I could not read %s from the registry.\n%r" % (what, e)) - - # It does not matter that we don't expand environment strings, in fact it's better not to. - - if type != REG_SZ and type != REG_EXPAND_SZ: - raise DistutilsSetupError("I expected the registry entry for %s to have a string type (REG_SZ or REG_EXPAND_SZ), " - "and was flummoxed by it having type code %r." % (what, type)) - return (value, type) - - - def open_and_query(key, path, subkey, what): - try: - read_key = OpenKey(key, path, 0, KEY_QUERY_VALUE) - except WindowsError, e: - if e.winerror == 2: # not found - return None - raise DistutilsSetupError("I could not read %s from the registry because I could not open " - "the parent key.\n%r" % (what, e)) - - try: - return query(read_key, subkey, what) - finally: - CloseKey(read_key) - - - def update(key_name_path, subkey, desired_value, desired_type, goal, what): - (key, name, path) = key_name_path - - (old_value, old_type) = open_and_query(key, path, subkey, what) or (None, None) - if (old_value, old_type) == (desired_value, desired_type): - print "Already done: %s." % (goal,) - return False - - try: - update_key = OpenKey(key, path, 0, KEY_SET_VALUE|KEY_QUERY_VALUE) - except WindowsError, e: - if e.winerror != 2: - raise DistutilsSetupError("I tried to %s, but was not successful because I could not open " - "the registry key %s\\%s for writing.\n%r" - % (goal, name, path, e)) - try: - update_key = CreateKey(key, path) - except WindowsError, e: - raise DistutilsSetupError("I tried to %s, but was not successful because the registry key %s\\%s " - "did not exist, and I was unable to create it.\n%r" - % (goal, name, path, e)) - - (new_value, new_type) = (None, None) - try: - SetValueEx(update_key, subkey, 0, desired_type, desired_value) - except WindowsError, e: - raise DistutilsSetupError("I tried to %s, but was not able to set the subkey %r under %s\\%s to be %r.\n%r" - % (goal, subkey, name, path, desired_value)) - else: - (new_value, new_type) = query(update_key, subkey, what) or (None, None) - finally: - FlushKey(update_key) - CloseKey(update_key) - - if (new_value, new_type) != (desired_value, desired_type): - raise DistutilsSetupError("I tried to %s by setting the subkey %r under %s\\%s to be %r, " - "and the call to SetValueEx succeeded, but the value ended up as " - "%r instead (it was previously %r). Maybe the update was unexpectedly virtualized?" - % (goal, subkey, name, path, desired_value, new_value, old_value)) - - print "Done: %s." % (goal,) - return True - - - # Maintenance hazard: 'add_to_environment' and 'associate' use very similar, but not identical logic. - - def add_to_environment(varname, addition, change_allusers): - changed = False - what = "the %s environment variable %s" % (change_allusers and "system" or "user", varname) - goal = "add %s to %s" % (addition, what) - - system_valueandtype = query(system_env, varname, "the system environment variable %s" % (varname,)) - user_valueandtype = query(user_env, varname, "the user environment variable %s" % (varname,)) - - if change_allusers: - (value, type) = system_valueandtype or (u'', REG_SZ) - key_name_path = (HKEY_LOCAL_MACHINE, "HKEY_LOCAL_MACHINE", SYSTEM_ENV) - else: - (value, type) = user_valueandtype or system_valueandtype or (u'', REG_SZ) - key_name_path = (HKEY_CURRENT_USER, "HKEY_CURRENT_USER", USER_ENV) - - if addition.lower() in value.lower().split(u';'): - print "Already done: %s." % (goal,) - else: - changed |= update(key_name_path, varname, value + u';' + addition, type, goal, what) - - if change_allusers: - # Also change any overriding environment entry for the current user. - (user_value, user_type) = user_valueandtype or (u'', REG_SZ) - split_value = user_value.lower().split(u';') - - if not (addition.lower() in split_value or u'%'+varname.lower()+u'%' in split_value): - now_what = "the overriding user environment variable %s" % (varname,) - changed |= update((HKEY_CURRENT_USER, "HKEY_CURRENT_USER", USER_ENV), - varname, user_value + u';' + addition, user_type, - "add %s to %s" % (addition, now_what), now_what) - - return changed - - - def associate(ext, target, change_allusers): - changed = False - what = "the %s association for %s" % (change_allusers and "system" or "user", ext) - goal = "associate the filetype %s with %s for %s" % (ext, target, change_allusers and "all users" or "the current user") - - try: - if change_allusers: - target_key = OpenKey(HKEY_LOCAL_MACHINE, "%s\\%s" % (SYSTEM_CLASSES, target), 0, KEY_QUERY_VALUE) - else: - target_key = OpenKey(HKEY_CLASSES_ROOT, target, 0, KEY_QUERY_VALUE) - except WindowsError, e: - raise DistutilsSetupError("I was going to %s, but that won't work because the %s class does not exist in the registry, " - "as far as I can tell.\n%r" % (goal, target, e)) - CloseKey(target_key) - - system_key_name_path = (HKEY_LOCAL_MACHINE, "HKEY_LOCAL_MACHINE", "%s\\%s" % (SYSTEM_CLASSES, ext)) - user_key_name_path = (HKEY_CURRENT_USER, "HKEY_CURRENT_USER", "%s\\%s" % (USER_CLASSES, ext)) - - system_valueandtype = open_and_query(system_classes, ext, "", "the system association for %s" % (ext,)) - user_valueandtype = open_and_query(user_classes, ext, "", "the user association for %s" % (ext,)) - - if change_allusers: - (value, type) = system_valueandtype or (u'', REG_SZ) - key_name_path = system_key_name_path - else: - (value, type) = user_valueandtype or system_valueandtype or (u'', REG_SZ) - key_name_path = user_key_name_path - - if value == target: - print "Already done: %s." % (goal,) - else: - changed |= update(key_name_path, "", unicode(target), REG_SZ, goal, what) - - if change_allusers: - # Also change any overriding association for the current user. - (user_value, user_type) = user_valueandtype or (u'', REG_SZ) - - if user_value != target: - changed |= update(user_key_name_path, "", unicode(target), REG_SZ, - "associate the filetype %s with %s for the current user " \ - "(because the system association is overridden)" % (ext, target), - "the overriding user association for %s" % (ext,)) - - return changed - - - def broadcast_settingchange(change_allusers): - print "Broadcasting that the environment has changed, please wait..." - - # - # - # LRESULT WINAPI SendMessageTimeoutW(HWND hWnd, UINT msg, WPARAM wParam, LPARAM lParam, - # UINT fuFlags, UINT uTimeout, PDWORD_PTR lpdwResult); - - try: - from ctypes import WINFUNCTYPE, POINTER, windll, addressof, c_wchar_p - from ctypes.wintypes import LONG, HWND, UINT, WPARAM, LPARAM, DWORD - - SendMessageTimeout = WINFUNCTYPE(POINTER(LONG), HWND, UINT, WPARAM, LPARAM, UINT, UINT, POINTER(POINTER(DWORD))) \ - (("SendMessageTimeoutW", windll.user32)) - HWND_BROADCAST = 0xFFFF - WM_SETTINGCHANGE = 0x001A - SMTO_ABORTIFHUNG = 0x0002 - SendMessageTimeout(HWND_BROADCAST, WM_SETTINGCHANGE, change_allusers and 1 or 0, - addressof(c_wchar_p(u"Environment")), SMTO_ABORTIFHUNG, 5000, None); - except Exception, e: - print "Warning: %r" % (e,) - - - changed_assoc = associate(".pyscript", "Python.File", allusers) - - changed_env = False - try: - changed_env |= add_to_environment("PATHEXT", ".pyscript", allusers) - changed_env |= add_to_environment("PATHEXT", ".pyw", allusers) - finally: - CloseKey(user_env) - CloseKey(system_env) - - if changed_assoc or changed_env: - broadcast_settingchange(allusers) - - if changed_env: - # whether logout is needed seems to randomly differ between installations - # of XP, but it is not needed in Vista or later. - try: - import platform, re - need_logout = not re.search(r'^[6-9]|([1-9][0-9]+)\.', platform.version()) - except Exception, e: - e # hush pyflakes - need_logout = True - - if need_logout: - print """ -*********************************************************************** -Changes have been made to the persistent environment, but they may not -take effect in this Windows session. Running installed Python scripts -from a Command Prompt may only work after you have logged out and back -in again, or rebooted. -*********************************************************************** -""" - else: - print """ -*********************************************************************** -Changes have been made to the persistent environment, but not in this -Command Prompt. Running installed Python scripts will only work from -new Command Prompts opened from now on. -*********************************************************************** -""" diff -Nru tahoe-lafs-1.9.2/setuptools-0.6c16dev3.egg/setuptools/command/sdist.py tahoe-lafs-1.10.0/setuptools-0.6c16dev3.egg/setuptools/command/sdist.py --- tahoe-lafs-1.9.2/setuptools-0.6c16dev3.egg/setuptools/command/sdist.py 2012-05-14 02:07:19.000000000 +0000 +++ tahoe-lafs-1.10.0/setuptools-0.6c16dev3.egg/setuptools/command/sdist.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,246 +0,0 @@ -from distutils.command.sdist import sdist as _sdist -from distutils.util import convert_path -from distutils import log -from glob import glob -import os, re, sys, pkg_resources - -entities = [ - ("<","<"), (">", ">"), (""", '"'), ("'", "'"), - ("&", "&") -] - -def unescape(data): - for old,new in entities: - data = data.replace(old,new) - return data - -def re_finder(pattern, postproc=None): - def find(dirname, filename): - f = open(filename,'rU') - data = f.read() - f.close() - for match in pattern.finditer(data): - path = match.group(1) - if postproc: - path = postproc(path) - yield joinpath(dirname,path) - return find - -def joinpath(prefix,suffix): - if not prefix: - return suffix - return os.path.join(prefix,suffix) - - - - - - - - - -def walk_revctrl(dirname=''): - """Find all files under revision control""" - for ep in pkg_resources.iter_entry_points('setuptools.file_finders'): - for item in ep.load()(dirname): - yield item - -def _default_revctrl(dirname=''): - for path, finder in finders: - path = joinpath(dirname,path) - if os.path.isfile(path): - for path in finder(dirname,path): - if os.path.isfile(path): - yield path - elif os.path.isdir(path): - for item in _default_revctrl(path): - yield item - -def externals_finder(dirname, filename): - """Find any 'svn:externals' directories""" - found = False - f = open(filename,'rb') - for line in iter(f.readline, ''): # can't use direct iter! - parts = line.split() - if len(parts)==2: - kind,length = parts - data = f.read(int(length)) - if kind=='K' and data=='svn:externals': - found = True - elif kind=='V' and found: - f.close() - break - else: - f.close() - return - - for line in data.splitlines(): - parts = line.split() - if parts: - yield joinpath(dirname, parts[0]) - - -entries_pattern = re.compile(r'name="([^"]+)"(?![^>]+deleted="true")', re.I) - -def entries_finder(dirname, filename): - f = open(filename,'rU') - data = f.read() - f.close() - if data.startswith('=6 and record[5]=="delete": - continue # skip deleted - yield joinpath(dirname, record[0]) - - -finders = [ - (convert_path('CVS/Entries'), - re_finder(re.compile(r"^\w?/([^/]+)/", re.M))), - (convert_path('.svn/entries'), entries_finder), - (convert_path('.svn/dir-props'), externals_finder), - (convert_path('.svn/dir-prop-base'), externals_finder), # svn 1.4 -] - - - - - - - - - - - - -class sdist(_sdist): - """Smart sdist that finds anything supported by revision control""" - - user_options = [ - ('formats=', None, - "formats for source distribution (comma-separated list)"), - ('keep-temp', 'k', - "keep the distribution tree around after creating " + - "archive file(s)"), - ('dist-dir=', 'd', - "directory to put the source distribution archive(s) in " - "[default: dist]"), - ] - - negative_opt = {} - - def run(self): - self.run_command('egg_info') - ei_cmd = self.get_finalized_command('egg_info') - self.filelist = ei_cmd.filelist - self.filelist.append(os.path.join(ei_cmd.egg_info,'SOURCES.txt')) - self.check_readme() - self.check_metadata() - self.make_distribution() - - dist_files = getattr(self.distribution,'dist_files',[]) - for file in self.archive_files: - data = ('sdist', '', file) - if data not in dist_files: - dist_files.append(data) - - def read_template(self): - try: - _sdist.read_template(self) - except: - # grody hack to close the template file (MANIFEST.in) - # this prevents easy_install's attempt at deleting the file from - # dying and thus masking the real error - sys.exc_info()[2].tb_next.tb_frame.f_locals['template'].close() - raise - - # Cribbed from old distutils code, to work around new distutils code - # that tries to do some of the same stuff as we do, in a way that makes - # us loop. - - def add_defaults (self): - standards = [('README', 'README.txt'), self.distribution.script_name] - - for fn in standards: - if type(fn) is tuple: - alts = fn - got_it = 0 - for fn in alts: - if os.path.exists(fn): - got_it = 1 - self.filelist.append(fn) - break - - if not got_it: - self.warn("standard file not found: should have one of " + - ', '.join(alts)) - else: - if os.path.exists(fn): - self.filelist.append(fn) - else: - self.warn("standard file '%s' not found" % fn) - - optional = ['test/test*.py', 'setup.cfg'] - - for pattern in optional: - files = filter(os.path.isfile, glob(pattern)) - if files: - self.filelist.extend(files) - - if self.distribution.has_pure_modules(): - build_py = self.get_finalized_command('build_py') - self.filelist.extend(build_py.get_source_files()) - - if self.distribution.has_ext_modules(): - build_ext = self.get_finalized_command('build_ext') - self.filelist.extend(build_ext.get_source_files()) - - if self.distribution.has_c_libraries(): - build_clib = self.get_finalized_command('build_clib') - self.filelist.extend(build_clib.get_source_files()) - - if self.distribution.has_scripts(): - build_scripts = self.get_finalized_command('build_scripts') - self.filelist.extend(build_scripts.get_source_files()) - - - def check_readme(self): - alts = ("README", "README.txt") - for f in alts: - if os.path.exists(f): - return - else: - self.warn( - "standard file not found: should have one of " +', '.join(alts) - ) - - - def make_release_tree(self, base_dir, files): - _sdist.make_release_tree(self, base_dir, files) - - # Save any egg_info command line options used to create this sdist - dest = os.path.join(base_dir, 'setup.cfg') - if hasattr(os,'link') and os.path.exists(dest): - # unlink and re-copy, since it might be hard-linked, and - # we don't want to change the source version - os.unlink(dest) - self.copy_file('setup.cfg', dest) - - self.get_finalized_command('egg_info').save_version_info(dest) - - - - - - - - -# diff -Nru tahoe-lafs-1.9.2/setuptools-0.6c16dev3.egg/setuptools/command/setopt.py tahoe-lafs-1.10.0/setuptools-0.6c16dev3.egg/setuptools/command/setopt.py --- tahoe-lafs-1.9.2/setuptools-0.6c16dev3.egg/setuptools/command/setopt.py 2012-05-14 02:07:19.000000000 +0000 +++ tahoe-lafs-1.10.0/setuptools-0.6c16dev3.egg/setuptools/command/setopt.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,158 +0,0 @@ -import distutils, os -from setuptools import Command -from distutils.util import convert_path -from distutils import log -from distutils.errors import * - -__all__ = ['config_file', 'edit_config', 'option_base', 'setopt'] - - -def config_file(kind="local"): - """Get the filename of the distutils, local, global, or per-user config - - `kind` must be one of "local", "global", or "user" - """ - if kind=='local': - return 'setup.cfg' - if kind=='global': - return os.path.join( - os.path.dirname(distutils.__file__),'distutils.cfg' - ) - if kind=='user': - dot = os.name=='posix' and '.' or '' - return os.path.expanduser(convert_path("~/%spydistutils.cfg" % dot)) - raise ValueError( - "config_file() type must be 'local', 'global', or 'user'", kind - ) - - - - - - - - - - - - - - - -def edit_config(filename, settings, dry_run=False): - """Edit a configuration file to include `settings` - - `settings` is a dictionary of dictionaries or ``None`` values, keyed by - command/section name. A ``None`` value means to delete the entire section, - while a dictionary lists settings to be changed or deleted in that section. - A setting of ``None`` means to delete that setting. - """ - from ConfigParser import RawConfigParser - log.debug("Reading configuration from %s", filename) - opts = RawConfigParser() - opts.read([filename]) - for section, options in settings.items(): - if options is None: - log.info("Deleting section [%s] from %s", section, filename) - opts.remove_section(section) - else: - if not opts.has_section(section): - log.debug("Adding new section [%s] to %s", section, filename) - opts.add_section(section) - for option,value in options.items(): - if value is None: - log.debug("Deleting %s.%s from %s", - section, option, filename - ) - opts.remove_option(section,option) - if not opts.options(section): - log.info("Deleting empty [%s] section from %s", - section, filename) - opts.remove_section(section) - else: - log.debug( - "Setting %s.%s to %r in %s", - section, option, value, filename - ) - opts.set(section,option,value) - - log.info("Writing %s", filename) - if not dry_run: - f = open(filename,'w'); opts.write(f); f.close() - -class option_base(Command): - """Abstract base class for commands that mess with config files""" - - user_options = [ - ('global-config', 'g', - "save options to the site-wide distutils.cfg file"), - ('user-config', 'u', - "save options to the current user's pydistutils.cfg file"), - ('filename=', 'f', - "configuration file to use (default=setup.cfg)"), - ] - - boolean_options = [ - 'global-config', 'user-config', - ] - - def initialize_options(self): - self.global_config = None - self.user_config = None - self.filename = None - - def finalize_options(self): - filenames = [] - if self.global_config: - filenames.append(config_file('global')) - if self.user_config: - filenames.append(config_file('user')) - if self.filename is not None: - filenames.append(self.filename) - if not filenames: - filenames.append(config_file('local')) - if len(filenames)>1: - raise DistutilsOptionError( - "Must specify only one configuration file option", - filenames - ) - self.filename, = filenames - - - - -class setopt(option_base): - """Save command-line options to a file""" - - description = "set an option in setup.cfg or another config file" - - user_options = [ - ('command=', 'c', 'command to set an option for'), - ('option=', 'o', 'option to set'), - ('set-value=', 's', 'value of the option'), - ('remove', 'r', 'remove (unset) the value'), - ] + option_base.user_options - - boolean_options = option_base.boolean_options + ['remove'] - - def initialize_options(self): - option_base.initialize_options(self) - self.command = None - self.option = None - self.set_value = None - self.remove = None - - def finalize_options(self): - option_base.finalize_options(self) - if self.command is None or self.option is None: - raise DistutilsOptionError("Must specify --command *and* --option") - if self.set_value is None and not self.remove: - raise DistutilsOptionError("Must specify --set-value or --remove") - - def run(self): - edit_config( - self.filename, { - self.command: {self.option.replace('-','_'):self.set_value} - }, - self.dry_run - ) diff -Nru tahoe-lafs-1.9.2/setuptools-0.6c16dev3.egg/setuptools/command/test.py tahoe-lafs-1.10.0/setuptools-0.6c16dev3.egg/setuptools/command/test.py --- tahoe-lafs-1.9.2/setuptools-0.6c16dev3.egg/setuptools/command/test.py 2012-05-14 02:07:19.000000000 +0000 +++ tahoe-lafs-1.10.0/setuptools-0.6c16dev3.egg/setuptools/command/test.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,164 +0,0 @@ -from setuptools import Command -from distutils.errors import DistutilsOptionError -import sys -from pkg_resources import * -from unittest import TestLoader, main - -class ScanningLoader(TestLoader): - - def loadTestsFromModule(self, module): - """Return a suite of all tests cases contained in the given module - - If the module is a package, load tests from all the modules in it. - If the module has an ``additional_tests`` function, call it and add - the return value to the tests. - """ - tests = [] - if module.__name__!='setuptools.tests.doctest': # ugh - tests.append(TestLoader.loadTestsFromModule(self,module)) - - if hasattr(module, "additional_tests"): - tests.append(module.additional_tests()) - - if hasattr(module, '__path__'): - for file in resource_listdir(module.__name__, ''): - if file.endswith('.py') and file!='__init__.py': - submodule = module.__name__+'.'+file[:-3] - else: - if resource_exists( - module.__name__, file+'/__init__.py' - ): - submodule = module.__name__+'.'+file - else: - continue - tests.append(self.loadTestsFromName(submodule)) - - if len(tests)!=1: - return self.suiteClass(tests) - else: - return tests[0] # don't create a nested suite for only one return - - -class test(Command): - """Command to run unit tests after in-place build""" - - description = "run unit tests after in-place build" - - user_options = [ - ('test-module=','m', "Run 'test_suite' in specified module"), - ('test-suite=','s', - "Test suite to run (e.g. 'some_module.test_suite')"), - ('test-runner=','r', "Test runner to use"), - ] - - def initialize_options(self): - self.test_runner = None - self.test_suite = None - self.test_module = None - self.test_loader = None - - def finalize_options(self): - if self.test_suite is None: - if self.test_module is None: - self.test_suite = self.distribution.test_suite - else: - self.test_suite = self.test_module+".test_suite" - elif self.test_module: - raise DistutilsOptionError( - "You may specify a module or a suite, but not both" - ) - - self.test_args = [self.test_suite] - - if self.verbose: - self.test_args.insert(0,'--verbose') - if self.test_loader is None: - self.test_loader = getattr(self.distribution,'test_loader',None) - if self.test_loader is None: - self.test_loader = "setuptools.command.test:ScanningLoader" - if self.test_runner is None: - self.test_runner = getattr(self.distribution,'test_runner',None) - - - def with_project_on_sys_path(self, func): - # Ensure metadata is up-to-date - self.run_command('egg_info') - - # Build extensions in-place - self.reinitialize_command('build_ext', inplace=1) - self.run_command('build_ext') - - ei_cmd = self.get_finalized_command("egg_info") - - old_path = sys.path[:] - old_modules = sys.modules.copy() - - try: - sys.path.insert(0, normalize_path(ei_cmd.egg_base)) - working_set.__init__() - add_activation_listener(lambda dist: dist.activate()) - require('%s==%s' % (ei_cmd.egg_name, ei_cmd.egg_version)) - func() - finally: - sys.path[:] = old_path - sys.modules.clear() - sys.modules.update(old_modules) - working_set.__init__() - - - def run(self): - if self.distribution.install_requires: - self.distribution.fetch_build_eggs(self.distribution.install_requires) - if self.distribution.tests_require: - self.distribution.fetch_build_eggs(self.distribution.tests_require) - - if self.test_suite: - cmd = ' '.join(self.test_args) - if self.dry_run: - self.announce('skipping "unittest %s" (dry run)' % cmd) - else: - self.announce('running "unittest %s"' % cmd) - self.with_project_on_sys_path(self.run_tests) - - - def run_tests(self): - import unittest - loader_ep = EntryPoint.parse("x="+self.test_loader) - loader_class = loader_ep.load(require=False) - kw = {} - if self.test_runner is not None: - runner_ep = EntryPoint.parse("x="+self.test_runner) - runner_class = runner_ep.load(require=False) - kw['testRunner'] = runner_class() - unittest.main( - None, None, [unittest.__file__]+self.test_args, - testLoader = loader_class(), **kw - ) - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff -Nru tahoe-lafs-1.9.2/setuptools-0.6c16dev3.egg/setuptools/command/upload.py tahoe-lafs-1.10.0/setuptools-0.6c16dev3.egg/setuptools/command/upload.py --- tahoe-lafs-1.9.2/setuptools-0.6c16dev3.egg/setuptools/command/upload.py 2012-05-14 02:07:19.000000000 +0000 +++ tahoe-lafs-1.10.0/setuptools-0.6c16dev3.egg/setuptools/command/upload.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,181 +0,0 @@ -"""distutils.command.upload - -Implements the Distutils 'upload' subcommand (upload package to PyPI).""" - -from distutils.errors import * -from distutils.core import Command -from distutils.spawn import spawn -from distutils import log -try: - from hashlib import md5 -except ImportError: - from md5 import md5 -import os -import socket -import platform -import ConfigParser -import httplib -import base64 -import urlparse -import cStringIO as StringIO - -class upload(Command): - - description = "upload binary package to PyPI" - - DEFAULT_REPOSITORY = 'http://pypi.python.org/pypi' - - user_options = [ - ('repository=', 'r', - "url of repository [default: %s]" % DEFAULT_REPOSITORY), - ('show-response', None, - 'display full response text from server'), - ('sign', 's', - 'sign files to upload using gpg'), - ('identity=', 'i', 'GPG identity used to sign files'), - ] - boolean_options = ['show-response', 'sign'] - - def initialize_options(self): - self.username = '' - self.password = '' - self.repository = '' - self.show_response = 0 - self.sign = False - self.identity = None - - def finalize_options(self): - if self.identity and not self.sign: - raise DistutilsOptionError( - "Must use --sign for --identity to have meaning" - ) - if os.environ.has_key('HOME'): - rc = os.path.join(os.environ['HOME'], '.pypirc') - if os.path.exists(rc): - self.announce('Using PyPI login from %s' % rc) - config = ConfigParser.ConfigParser({ - 'username':'', - 'password':'', - 'repository':''}) - config.read(rc) - if not self.repository: - self.repository = config.get('server-login', 'repository') - if not self.username: - self.username = config.get('server-login', 'username') - if not self.password: - self.password = config.get('server-login', 'password') - if not self.repository: - self.repository = self.DEFAULT_REPOSITORY - - def run(self): - if not self.distribution.dist_files: - raise DistutilsOptionError("No dist file created in earlier command") - for command, pyversion, filename in self.distribution.dist_files: - self.upload_file(command, pyversion, filename) - - def upload_file(self, command, pyversion, filename): - # Sign if requested - if self.sign: - gpg_args = ["gpg", "--detach-sign", "-a", filename] - if self.identity: - gpg_args[2:2] = ["--local-user", self.identity] - spawn(gpg_args, - dry_run=self.dry_run) - - # Fill in the data - content = open(filename,'rb').read() - basename = os.path.basename(filename) - comment = '' - if command=='bdist_egg' and self.distribution.has_ext_modules(): - comment = "built on %s" % platform.platform(terse=1) - data = { - ':action':'file_upload', - 'protcol_version':'1', - 'name':self.distribution.get_name(), - 'version':self.distribution.get_version(), - 'content':(basename,content), - 'filetype':command, - 'pyversion':pyversion, - 'md5_digest':md5(content).hexdigest(), - } - if command == 'bdist_rpm': - dist, version, id = platform.dist() - if dist: - comment = 'built for %s %s' % (dist, version) - elif command == 'bdist_dumb': - comment = 'built for %s' % platform.platform(terse=1) - data['comment'] = comment - - if self.sign: - data['gpg_signature'] = (os.path.basename(filename) + ".asc", - open(filename+".asc").read()) - - # set up the authentication - auth = "Basic " + base64.encodestring(self.username + ":" + self.password).strip() - - # Build up the MIME payload for the POST data - boundary = '--------------GHSKFJDLGDS7543FJKLFHRE75642756743254' - sep_boundary = '\n--' + boundary - end_boundary = sep_boundary + '--' - body = StringIO.StringIO() - for key, value in data.items(): - # handle multiple entries for the same name - if type(value) != type([]): - value = [value] - for value in value: - if type(value) is tuple: - fn = ';filename="%s"' % value[0] - value = value[1] - else: - fn = "" - value = str(value) - body.write(sep_boundary) - body.write('\nContent-Disposition: form-data; name="%s"'%key) - body.write(fn) - body.write("\n\n") - body.write(value) - if value and value[-1] == '\r': - body.write('\n') # write an extra newline (lurve Macs) - body.write(end_boundary) - body.write("\n") - body = body.getvalue() - - self.announce("Submitting %s to %s" % (filename, self.repository), log.INFO) - - # build the Request - # We can't use urllib2 since we need to send the Basic - # auth right with the first request - schema, netloc, url, params, query, fragments = \ - urlparse.urlparse(self.repository) - assert not params and not query and not fragments - if schema == 'http': - http = httplib.HTTPConnection(netloc) - elif schema == 'https': - http = httplib.HTTPSConnection(netloc) - else: - raise AssertionError, "unsupported schema "+schema - - data = '' - loglevel = log.INFO - try: - http.connect() - http.putrequest("POST", url) - http.putheader('Content-type', - 'multipart/form-data; boundary=%s'%boundary) - http.putheader('Content-length', str(len(body))) - http.putheader('Authorization', auth) - http.endheaders() - http.send(body) - except socket.error, e: - self.announce(str(e), log.ERROR) - return - - r = http.getresponse() - if r.status == 200: - self.announce('Server response (%s): %s' % (r.status, r.reason), - log.INFO) - else: - self.announce('Upload failed (%s): %s' % (r.status, r.reason), - log.ERROR) - if self.show_response: - print '-'*75, r.read(), '-'*75 diff -Nru tahoe-lafs-1.9.2/setuptools-0.6c16dev3.egg/setuptools/depends.py tahoe-lafs-1.10.0/setuptools-0.6c16dev3.egg/setuptools/depends.py --- tahoe-lafs-1.9.2/setuptools-0.6c16dev3.egg/setuptools/depends.py 2012-05-14 02:07:19.000000000 +0000 +++ tahoe-lafs-1.10.0/setuptools-0.6c16dev3.egg/setuptools/depends.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,246 +0,0 @@ -from __future__ import generators -import sys, imp, marshal -from imp import PKG_DIRECTORY, PY_COMPILED, PY_SOURCE, PY_FROZEN -from distutils.version import StrictVersion, LooseVersion - -__all__ = [ - 'Require', 'find_module', 'get_module_constant', 'extract_constant' -] - -class Require: - """A prerequisite to building or installing a distribution""" - - def __init__(self,name,requested_version,module,homepage='', - attribute=None,format=None - ): - - if format is None and requested_version is not None: - format = StrictVersion - - if format is not None: - requested_version = format(requested_version) - if attribute is None: - attribute = '__version__' - - self.__dict__.update(locals()) - del self.self - - - def full_name(self): - """Return full package/distribution name, w/version""" - if self.requested_version is not None: - return '%s-%s' % (self.name,self.requested_version) - return self.name - - - def version_ok(self,version): - """Is 'version' sufficiently up-to-date?""" - return self.attribute is None or self.format is None or \ - str(version)!="unknown" and version >= self.requested_version - - - def get_version(self, paths=None, default="unknown"): - - """Get version number of installed module, 'None', or 'default' - - Search 'paths' for module. If not found, return 'None'. If found, - return the extracted version attribute, or 'default' if no version - attribute was specified, or the value cannot be determined without - importing the module. The version is formatted according to the - requirement's version format (if any), unless it is 'None' or the - supplied 'default'. - """ - - if self.attribute is None: - try: - f,p,i = find_module(self.module,paths) - if f: f.close() - return default - except ImportError: - return None - - v = get_module_constant(self.module,self.attribute,default,paths) - - if v is not None and v is not default and self.format is not None: - return self.format(v) - - return v - - - def is_present(self,paths=None): - """Return true if dependency is present on 'paths'""" - return self.get_version(paths) is not None - - - def is_current(self,paths=None): - """Return true if dependency is present and up-to-date on 'paths'""" - version = self.get_version(paths) - if version is None: - return False - return self.version_ok(version) - - -def _iter_code(code): - - """Yield '(op,arg)' pair for each operation in code object 'code'""" - - from array import array - from dis import HAVE_ARGUMENT, EXTENDED_ARG - - bytes = array('b',code.co_code) - eof = len(code.co_code) - - ptr = 0 - extended_arg = 0 - - while ptr=HAVE_ARGUMENT: - - arg = bytes[ptr+1] + bytes[ptr+2]*256 + extended_arg - ptr += 3 - - if op==EXTENDED_ARG: - extended_arg = arg * 65536L - continue - - else: - arg = None - ptr += 1 - - yield op,arg - - - - - - - - - - -def find_module(module, paths=None): - """Just like 'imp.find_module()', but with package support""" - - parts = module.split('.') - - while parts: - part = parts.pop(0) - f, path, (suffix,mode,kind) = info = imp.find_module(part, paths) - - if kind==PKG_DIRECTORY: - parts = parts or ['__init__'] - paths = [path] - - elif parts: - raise ImportError("Can't find %r in %s" % (parts,module)) - - return info - - - - - - - - - - - - - - - - - - - - - - - - -def get_module_constant(module, symbol, default=-1, paths=None): - - """Find 'module' by searching 'paths', and extract 'symbol' - - Return 'None' if 'module' does not exist on 'paths', or it does not define - 'symbol'. If the module defines 'symbol' as a constant, return the - constant. Otherwise, return 'default'.""" - - try: - f, path, (suffix,mode,kind) = find_module(module,paths) - except ImportError: - # Module doesn't exist - return None - - try: - if kind==PY_COMPILED: - f.read(8) # skip magic & date - code = marshal.load(f) - elif kind==PY_FROZEN: - code = imp.get_frozen_object(module) - elif kind==PY_SOURCE: - code = compile(f.read(), path, 'exec') - else: - # Not something we can parse; we'll have to import it. :( - if module not in sys.modules: - imp.load_module(module,f,path,(suffix,mode,kind)) - return getattr(sys.modules[module],symbol,None) - - finally: - if f: - f.close() - - return extract_constant(code,symbol,default) - - - - - - - - -def extract_constant(code,symbol,default=-1): - """Extract the constant value of 'symbol' from 'code' - - If the name 'symbol' is bound to a constant value by the Python code - object 'code', return that value. If 'symbol' is bound to an expression, - return 'default'. Otherwise, return 'None'. - - Return value is based on the first assignment to 'symbol'. 'symbol' must - be a global, or at least a non-"fast" local in the code block. That is, - only 'STORE_NAME' and 'STORE_GLOBAL' opcodes are checked, and 'symbol' - must be present in 'code.co_names'. - """ - - if symbol not in code.co_names: - # name's not there, can't possibly be an assigment - return None - - name_idx = list(code.co_names).index(symbol) - - STORE_NAME = 90 - STORE_GLOBAL = 97 - LOAD_CONST = 100 - - const = default - - for op, arg in _iter_code(code): - - if op==LOAD_CONST: - const = code.co_consts[arg] - elif arg==name_idx and (op==STORE_NAME or op==STORE_GLOBAL): - return const - else: - const = default - -if sys.platform.startswith('java') or sys.platform == 'cli': - # XXX it'd be better to test assertions about bytecode instead... - del extract_constant, get_module_constant - __all__.remove('extract_constant') - __all__.remove('get_module_constant') - - diff -Nru tahoe-lafs-1.9.2/setuptools-0.6c16dev3.egg/setuptools/dist.py tahoe-lafs-1.10.0/setuptools-0.6c16dev3.egg/setuptools/dist.py --- tahoe-lafs-1.9.2/setuptools-0.6c16dev3.egg/setuptools/dist.py 2012-05-14 02:07:19.000000000 +0000 +++ tahoe-lafs-1.10.0/setuptools-0.6c16dev3.egg/setuptools/dist.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,861 +0,0 @@ -__all__ = ['Distribution'] - -from distutils.core import Distribution as _Distribution -from setuptools.depends import Require -from setuptools.command.install import install -from setuptools.command.sdist import sdist -from setuptools.command.install_lib import install_lib -from distutils.errors import DistutilsOptionError, DistutilsPlatformError -from distutils.errors import DistutilsSetupError -import setuptools, pkg_resources, distutils.core, distutils.dist, distutils.cmd -import os, distutils.log, re - -def _get_unpatched(cls): - """Protect against re-patching the distutils if reloaded - - Also ensures that no other distutils extension monkeypatched the distutils - first. - """ - while cls.__module__.startswith('setuptools'): - cls, = cls.__bases__ - if not cls.__module__.startswith('distutils'): - raise AssertionError( - "distutils has already been patched by %r" % cls - ) - return cls - -_Distribution = _get_unpatched(_Distribution) - -sequence = tuple, list - -def check_importable(dist, attr, value): - try: - ep = pkg_resources.EntryPoint.parse('x='+value) - assert not ep.extras - except (TypeError,ValueError,AttributeError,AssertionError): - raise DistutilsSetupError( - "%r must be importable 'module:attrs' string (got %r)" - % (attr,value) - ) - - -def assert_string_list(dist, attr, value): - """Verify that value is a string list or None""" - try: - assert ''.join(value)!=value - except (TypeError,ValueError,AttributeError,AssertionError): - raise DistutilsSetupError( - "%r must be a list of strings (got %r)" % (attr,value) - ) - -def check_nsp(dist, attr, value): - """Verify that namespace packages are valid""" - assert_string_list(dist,attr,value) - for nsp in value: - if not dist.has_contents_for(nsp): - raise DistutilsSetupError( - "Distribution contains no modules or packages for " + - "namespace package %r" % nsp - ) - if '.' in nsp: - parent = '.'.join(nsp.split('.')[:-1]) - if parent not in value: - distutils.log.warn( - "WARNING: %r is declared as a package namespace, but %r" - " is not: please correct this in setup.py", nsp, parent - ) - -def check_extras(dist, attr, value): - """Verify that extras_require mapping is valid""" - try: - for k,v in value.items(): - list(pkg_resources.parse_requirements(v)) - except (TypeError,ValueError,AttributeError): - raise DistutilsSetupError( - "'extras_require' must be a dictionary whose values are " - "strings or lists of strings containing valid project/version " - "requirement specifiers." - ) - - - - -def assert_bool(dist, attr, value): - """Verify that value is True, False, 0, or 1""" - if bool(value) != value: - raise DistutilsSetupError( - "%r must be a boolean value (got %r)" % (attr,value) - ) -def check_requirements(dist, attr, value): - """Verify that install_requires is a valid requirements list""" - try: - list(pkg_resources.parse_requirements(value)) - except (TypeError,ValueError): - raise DistutilsSetupError( - "%r must be a string or list of strings " - "containing valid project/version requirement specifiers" % (attr,) - ) -def check_entry_points(dist, attr, value): - """Verify that entry_points map is parseable""" - try: - pkg_resources.EntryPoint.parse_map(value) - except ValueError, e: - raise DistutilsSetupError(e) - -def check_test_suite(dist, attr, value): - if not isinstance(value,basestring): - raise DistutilsSetupError("test_suite must be a string") - -def check_package_data(dist, attr, value): - """Verify that value is a dictionary of package names to glob lists""" - if isinstance(value,dict): - for k,v in value.items(): - if not isinstance(k,str): break - try: iter(v) - except TypeError: - break - else: - return - raise DistutilsSetupError( - attr+" must be a dictionary mapping package names to lists of " - "wildcard patterns" - ) - -def check_packages(dist, attr, value): - for pkgname in value: - if not re.match(r'\w+(\.\w+)*', pkgname): - distutils.log.warn( - "WARNING: %r not a valid package name; please use only" - ".-separated package names in setup.py", pkgname - ) - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -class Distribution(_Distribution): - """Distribution with support for features, tests, and package data - - This is an enhanced version of 'distutils.dist.Distribution' that - effectively adds the following new optional keyword arguments to 'setup()': - - 'install_requires' -- a string or sequence of strings specifying project - versions that the distribution requires when installed, in the format - used by 'pkg_resources.require()'. They will be installed - automatically when the package is installed. If you wish to use - packages that are not available in PyPI, or want to give your users an - alternate download location, you can add a 'find_links' option to the - '[easy_install]' section of your project's 'setup.cfg' file, and then - setuptools will scan the listed web pages for links that satisfy the - requirements. - - 'extras_require' -- a dictionary mapping names of optional "extras" to the - additional requirement(s) that using those extras incurs. For example, - this:: - - extras_require = dict(reST = ["docutils>=0.3", "reSTedit"]) - - indicates that the distribution can optionally provide an extra - capability called "reST", but it can only be used if docutils and - reSTedit are installed. If the user installs your package using - EasyInstall and requests one of your extras, the corresponding - additional requirements will be installed if needed. - - 'features' -- a dictionary mapping option names to 'setuptools.Feature' - objects. Features are a portion of the distribution that can be - included or excluded based on user options, inter-feature dependencies, - and availability on the current system. Excluded features are omitted - from all setup commands, including source and binary distributions, so - you can create multiple distributions from the same source tree. - Feature names should be valid Python identifiers, except that they may - contain the '-' (minus) sign. Features can be included or excluded - via the command line options '--with-X' and '--without-X', where 'X' is - the name of the feature. Whether a feature is included by default, and - whether you are allowed to control this from the command line, is - determined by the Feature object. See the 'Feature' class for more - information. - - 'test_suite' -- the name of a test suite to run for the 'test' command. - If the user runs 'python setup.py test', the package will be installed, - and the named test suite will be run. The format is the same as - would be used on a 'unittest.py' command line. That is, it is the - dotted name of an object to import and call to generate a test suite. - - 'package_data' -- a dictionary mapping package names to lists of filenames - or globs to use to find data files contained in the named packages. - If the dictionary has filenames or globs listed under '""' (the empty - string), those names will be searched for in every package, in addition - to any names for the specific package. Data files found using these - names/globs will be installed along with the package, in the same - location as the package. Note that globs are allowed to reference - the contents of non-package subdirectories, as long as you use '/' as - a path separator. (Globs are automatically converted to - platform-specific paths at runtime.) - - In addition to these new keywords, this class also has several new methods - for manipulating the distribution's contents. For example, the 'include()' - and 'exclude()' methods can be thought of as in-place add and subtract - commands that add or remove packages, modules, extensions, and so on from - the distribution. They are used by the feature subsystem to configure the - distribution for the included and excluded features. - """ - - _patched_dist = None - - def patch_missing_pkg_info(self, attrs): - # Fake up a replacement for the data that would normally come from - # PKG-INFO, but which might not yet be built if this is a fresh - # checkout. - # - if not attrs or 'name' not in attrs or 'version' not in attrs: - return - key = pkg_resources.safe_name(str(attrs['name'])).lower() - dist = pkg_resources.working_set.by_key.get(key) - if dist is not None and not dist.has_metadata('PKG-INFO'): - dist._version = pkg_resources.safe_version(str(attrs['version'])) - self._patched_dist = dist - - def __init__ (self, attrs=None): - have_package_data = hasattr(self, "package_data") - if not have_package_data: - self.package_data = {} - self.require_features = [] - self.features = {} - self.dist_files = [] - self.patch_missing_pkg_info(attrs) - # Make sure we have any eggs needed to interpret 'attrs' - if attrs is not None: - self.dependency_links = attrs.pop('dependency_links', []) - assert_string_list(self,'dependency_links',self.dependency_links) - if attrs and 'setup_requires' in attrs: - self.fetch_build_eggs(attrs.pop('setup_requires')) - for ep in pkg_resources.iter_entry_points('distutils.setup_keywords'): - if not hasattr(self,ep.name): - setattr(self,ep.name,None) - _Distribution.__init__(self,attrs) - if isinstance(self.metadata.version, (int,long,float)): - # Some people apparently take "version number" too literally :) - self.metadata.version = str(self.metadata.version) - - def parse_command_line(self): - """Process features after parsing command line options""" - result = _Distribution.parse_command_line(self) - if self.features: - self._finalize_features() - return result - - def _feature_attrname(self,name): - """Convert feature name to corresponding option attribute name""" - return 'with_'+name.replace('-','_') - - def fetch_build_eggs(self, requires): - """Resolve pre-setup requirements""" - from pkg_resources import working_set, parse_requirements - for dist in working_set.resolve( - parse_requirements(requires), installer=self.fetch_build_egg - ): - working_set.add(dist) - - def finalize_options(self): - _Distribution.finalize_options(self) - if self.features: - self._set_global_opts_from_features() - - for ep in pkg_resources.iter_entry_points('distutils.setup_keywords'): - value = getattr(self,ep.name,None) - if value is not None: - ep.require(installer=self.fetch_build_egg) - ep.load()(self, ep.name, value) - - def fetch_build_egg(self, req): - """Fetch an egg needed for building""" - try: - cmd = self._egg_fetcher - except AttributeError: - from setuptools.command.easy_install import easy_install - dist = self.__class__({'script_args':['easy_install']}) - dist.parse_config_files() - opts = dist.get_option_dict('easy_install') - keep = ( - 'find_links', 'site_dirs', 'index_url', 'optimize', - 'site_dirs', 'allow_hosts' - ) - for key in opts.keys(): - if key not in keep: - del opts[key] # don't use any other settings - if self.dependency_links: - links = self.dependency_links[:] - if 'find_links' in opts: - links = opts['find_links'][1].split() + links - opts['find_links'] = ('setup', links) - cmd = easy_install( - dist, args=["x"], install_dir=os.curdir, exclude_scripts=True, - always_copy=False, build_directory=None, editable=False, - upgrade=False, multi_version=True, no_report = True - ) - cmd.ensure_finalized() - self._egg_fetcher = cmd - return cmd.easy_install(req) - - def _set_global_opts_from_features(self): - """Add --with-X/--without-X options based on optional features""" - - go = [] - no = self.negative_opt.copy() - - for name,feature in self.features.items(): - self._set_feature(name,None) - feature.validate(self) - - if feature.optional: - descr = feature.description - incdef = ' (default)' - excdef='' - if not feature.include_by_default(): - excdef, incdef = incdef, excdef - - go.append(('with-'+name, None, 'include '+descr+incdef)) - go.append(('without-'+name, None, 'exclude '+descr+excdef)) - no['without-'+name] = 'with-'+name - - self.global_options = self.feature_options = go + self.global_options - self.negative_opt = self.feature_negopt = no - - - - - - - - - - - - - - - - - - - def _finalize_features(self): - """Add/remove features and resolve dependencies between them""" - - # First, flag all the enabled items (and thus their dependencies) - for name,feature in self.features.items(): - enabled = self.feature_is_included(name) - if enabled or (enabled is None and feature.include_by_default()): - feature.include_in(self) - self._set_feature(name,1) - - # Then disable the rest, so that off-by-default features don't - # get flagged as errors when they're required by an enabled feature - for name,feature in self.features.items(): - if not self.feature_is_included(name): - feature.exclude_from(self) - self._set_feature(name,0) - - - def get_command_class(self, command): - """Pluggable version of get_command_class()""" - if command in self.cmdclass: - return self.cmdclass[command] - - for ep in pkg_resources.iter_entry_points('distutils.commands',command): - ep.require(installer=self.fetch_build_egg) - self.cmdclass[command] = cmdclass = ep.load() - return cmdclass - else: - return _Distribution.get_command_class(self, command) - - def print_commands(self): - for ep in pkg_resources.iter_entry_points('distutils.commands'): - if ep.name not in self.cmdclass: - cmdclass = ep.load(False) # don't require extras, we're not running - self.cmdclass[ep.name] = cmdclass - return _Distribution.print_commands(self) - - - - - - def _set_feature(self,name,status): - """Set feature's inclusion status""" - setattr(self,self._feature_attrname(name),status) - - def feature_is_included(self,name): - """Return 1 if feature is included, 0 if excluded, 'None' if unknown""" - return getattr(self,self._feature_attrname(name)) - - def include_feature(self,name): - """Request inclusion of feature named 'name'""" - - if self.feature_is_included(name)==0: - descr = self.features[name].description - raise DistutilsOptionError( - descr + " is required, but was excluded or is not available" - ) - self.features[name].include_in(self) - self._set_feature(name,1) - - def include(self,**attrs): - """Add items to distribution that are named in keyword arguments - - For example, 'dist.exclude(py_modules=["x"])' would add 'x' to - the distribution's 'py_modules' attribute, if it was not already - there. - - Currently, this method only supports inclusion for attributes that are - lists or tuples. If you need to add support for adding to other - attributes in this or a subclass, you can add an '_include_X' method, - where 'X' is the name of the attribute. The method will be called with - the value passed to 'include()'. So, 'dist.include(foo={"bar":"baz"})' - will try to call 'dist._include_foo({"bar":"baz"})', which can then - handle whatever special inclusion logic is needed. - """ - for k,v in attrs.items(): - include = getattr(self, '_include_'+k, None) - if include: - include(v) - else: - self._include_misc(k,v) - - def exclude_package(self,package): - """Remove packages, modules, and extensions in named package""" - - pfx = package+'.' - if self.packages: - self.packages = [ - p for p in self.packages - if p!=package and not p.startswith(pfx) - ] - - if self.py_modules: - self.py_modules = [ - p for p in self.py_modules - if p!=package and not p.startswith(pfx) - ] - - if self.ext_modules: - self.ext_modules = [ - p for p in self.ext_modules - if p.name!=package and not p.name.startswith(pfx) - ] - - - def has_contents_for(self,package): - """Return true if 'exclude_package(package)' would do something""" - - pfx = package+'.' - - for p in self.iter_distribution_names(): - if p==package or p.startswith(pfx): - return True - - - - - - - - - - - def _exclude_misc(self,name,value): - """Handle 'exclude()' for list/tuple attrs without a special handler""" - if not isinstance(value,sequence): - raise DistutilsSetupError( - "%s: setting must be a list or tuple (%r)" % (name, value) - ) - try: - old = getattr(self,name) - except AttributeError: - raise DistutilsSetupError( - "%s: No such distribution setting" % name - ) - if old is not None and not isinstance(old,sequence): - raise DistutilsSetupError( - name+": this setting cannot be changed via include/exclude" - ) - elif old: - setattr(self,name,[item for item in old if item not in value]) - - def _include_misc(self,name,value): - """Handle 'include()' for list/tuple attrs without a special handler""" - - if not isinstance(value,sequence): - raise DistutilsSetupError( - "%s: setting must be a list (%r)" % (name, value) - ) - try: - old = getattr(self,name) - except AttributeError: - raise DistutilsSetupError( - "%s: No such distribution setting" % name - ) - if old is None: - setattr(self,name,value) - elif not isinstance(old,sequence): - raise DistutilsSetupError( - name+": this setting cannot be changed via include/exclude" - ) - else: - setattr(self,name,old+[item for item in value if item not in old]) - - def exclude(self,**attrs): - """Remove items from distribution that are named in keyword arguments - - For example, 'dist.exclude(py_modules=["x"])' would remove 'x' from - the distribution's 'py_modules' attribute. Excluding packages uses - the 'exclude_package()' method, so all of the package's contained - packages, modules, and extensions are also excluded. - - Currently, this method only supports exclusion from attributes that are - lists or tuples. If you need to add support for excluding from other - attributes in this or a subclass, you can add an '_exclude_X' method, - where 'X' is the name of the attribute. The method will be called with - the value passed to 'exclude()'. So, 'dist.exclude(foo={"bar":"baz"})' - will try to call 'dist._exclude_foo({"bar":"baz"})', which can then - handle whatever special exclusion logic is needed. - """ - for k,v in attrs.items(): - exclude = getattr(self, '_exclude_'+k, None) - if exclude: - exclude(v) - else: - self._exclude_misc(k,v) - - def _exclude_packages(self,packages): - if not isinstance(packages,sequence): - raise DistutilsSetupError( - "packages: setting must be a list or tuple (%r)" % (packages,) - ) - map(self.exclude_package, packages) - - - - - - - - - - - - - def _parse_command_opts(self, parser, args): - # Remove --with-X/--without-X options when processing command args - self.global_options = self.__class__.global_options - self.negative_opt = self.__class__.negative_opt - - # First, expand any aliases - command = args[0] - aliases = self.get_option_dict('aliases') - while command in aliases: - src,alias = aliases[command] - del aliases[command] # ensure each alias can expand only once! - import shlex - args[:1] = shlex.split(alias,True) - command = args[0] - - nargs = _Distribution._parse_command_opts(self, parser, args) - - # Handle commands that want to consume all remaining arguments - cmd_class = self.get_command_class(command) - if getattr(cmd_class,'command_consumes_arguments',None): - self.get_option_dict(command)['args'] = ("command line", nargs) - if nargs is not None: - return [] - - return nargs - - - - - - - - - - - - - - - - - def get_cmdline_options(self): - """Return a '{cmd: {opt:val}}' map of all command-line options - - Option names are all long, but do not include the leading '--', and - contain dashes rather than underscores. If the option doesn't take - an argument (e.g. '--quiet'), the 'val' is 'None'. - - Note that options provided by config files are intentionally excluded. - """ - - d = {} - - for cmd,opts in self.command_options.items(): - - for opt,(src,val) in opts.items(): - - if src != "command line": - continue - - opt = opt.replace('_','-') - - if val==0: - cmdobj = self.get_command_obj(cmd) - neg_opt = self.negative_opt.copy() - neg_opt.update(getattr(cmdobj,'negative_opt',{})) - for neg,pos in neg_opt.items(): - if pos==opt: - opt=neg - val=None - break - else: - raise AssertionError("Shouldn't be able to get here") - - elif val==1: - val = None - - d.setdefault(cmd,{})[opt] = val - - return d - - - def iter_distribution_names(self): - """Yield all packages, modules, and extension names in distribution""" - - for pkg in self.packages or (): - yield pkg - - for module in self.py_modules or (): - yield module - - for ext in self.ext_modules or (): - if isinstance(ext,tuple): - name, buildinfo = ext - else: - name = ext.name - if name.endswith('module'): - name = name[:-6] - yield name - -# Install it throughout the distutils -for module in distutils.dist, distutils.core, distutils.cmd: - module.Distribution = Distribution - - - - - - - - - - - - - - - - - - - - -class Feature: - """A subset of the distribution that can be excluded if unneeded/wanted - - Features are created using these keyword arguments: - - 'description' -- a short, human readable description of the feature, to - be used in error messages, and option help messages. - - 'standard' -- if true, the feature is included by default if it is - available on the current system. Otherwise, the feature is only - included if requested via a command line '--with-X' option, or if - another included feature requires it. The default setting is 'False'. - - 'available' -- if true, the feature is available for installation on the - current system. The default setting is 'True'. - - 'optional' -- if true, the feature's inclusion can be controlled from the - command line, using the '--with-X' or '--without-X' options. If - false, the feature's inclusion status is determined automatically, - based on 'availabile', 'standard', and whether any other feature - requires it. The default setting is 'True'. - - 'require_features' -- a string or sequence of strings naming features - that should also be included if this feature is included. Defaults to - empty list. May also contain 'Require' objects that should be - added/removed from the distribution. - - 'remove' -- a string or list of strings naming packages to be removed - from the distribution if this feature is *not* included. If the - feature *is* included, this argument is ignored. This argument exists - to support removing features that "crosscut" a distribution, such as - defining a 'tests' feature that removes all the 'tests' subpackages - provided by other features. The default for this argument is an empty - list. (Note: the named package(s) or modules must exist in the base - distribution when the 'setup()' function is initially called.) - - other keywords -- any other keyword arguments are saved, and passed to - the distribution's 'include()' and 'exclude()' methods when the - feature is included or excluded, respectively. So, for example, you - could pass 'packages=["a","b"]' to cause packages 'a' and 'b' to be - added or removed from the distribution as appropriate. - - A feature must include at least one 'requires', 'remove', or other - keyword argument. Otherwise, it can't affect the distribution in any way. - Note also that you can subclass 'Feature' to create your own specialized - feature types that modify the distribution in other ways when included or - excluded. See the docstrings for the various methods here for more detail. - Aside from the methods, the only feature attributes that distributions look - at are 'description' and 'optional'. - """ - def __init__(self, description, standard=False, available=True, - optional=True, require_features=(), remove=(), **extras - ): - - self.description = description - self.standard = standard - self.available = available - self.optional = optional - if isinstance(require_features,(str,Require)): - require_features = require_features, - - self.require_features = [ - r for r in require_features if isinstance(r,str) - ] - er = [r for r in require_features if not isinstance(r,str)] - if er: extras['require_features'] = er - - if isinstance(remove,str): - remove = remove, - self.remove = remove - self.extras = extras - - if not remove and not require_features and not extras: - raise DistutilsSetupError( - "Feature %s: must define 'require_features', 'remove', or at least one" - " of 'packages', 'py_modules', etc." - ) - - def include_by_default(self): - """Should this feature be included by default?""" - return self.available and self.standard - - def include_in(self,dist): - - """Ensure feature and its requirements are included in distribution - - You may override this in a subclass to perform additional operations on - the distribution. Note that this method may be called more than once - per feature, and so should be idempotent. - - """ - - if not self.available: - raise DistutilsPlatformError( - self.description+" is required," - "but is not available on this platform" - ) - - dist.include(**self.extras) - - for f in self.require_features: - dist.include_feature(f) - - - - def exclude_from(self,dist): - - """Ensure feature is excluded from distribution - - You may override this in a subclass to perform additional operations on - the distribution. This method will be called at most once per - feature, and only after all included features have been asked to - include themselves. - """ - - dist.exclude(**self.extras) - - if self.remove: - for item in self.remove: - dist.exclude_package(item) - - - - def validate(self,dist): - - """Verify that feature makes sense in context of distribution - - This method is called by the distribution just before it parses its - command line. It checks to ensure that the 'remove' attribute, if any, - contains only valid package/module names that are present in the base - distribution when 'setup()' is called. You may override it in a - subclass to perform any other required validation of the feature - against a target distribution. - """ - - for item in self.remove: - if not dist.has_contents_for(item): - raise DistutilsSetupError( - "%s wants to be able to remove %s, but the distribution" - " doesn't contain any packages or modules under %s" - % (self.description, item, item) - ) - - - - - - - - - - - - - - - - - - - - - - diff -Nru tahoe-lafs-1.9.2/setuptools-0.6c16dev3.egg/setuptools/extension.py tahoe-lafs-1.10.0/setuptools-0.6c16dev3.egg/setuptools/extension.py --- tahoe-lafs-1.9.2/setuptools-0.6c16dev3.egg/setuptools/extension.py 2012-05-14 02:07:19.000000000 +0000 +++ tahoe-lafs-1.10.0/setuptools-0.6c16dev3.egg/setuptools/extension.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,35 +0,0 @@ -from distutils.core import Extension as _Extension -from dist import _get_unpatched -_Extension = _get_unpatched(_Extension) - -try: - from Pyrex.Distutils.build_ext import build_ext -except ImportError: - have_pyrex = False -else: - have_pyrex = True - - -class Extension(_Extension): - """Extension that uses '.c' files in place of '.pyx' files""" - - if not have_pyrex: - # convert .pyx extensions to .c - def __init__(self,*args,**kw): - _Extension.__init__(self,*args,**kw) - sources = [] - for s in self.sources: - if s.endswith('.pyx'): - sources.append(s[:-3]+'c') - else: - sources.append(s) - self.sources = sources - -class Library(Extension): - """Just like a regular Extension, but built as a library instead""" - -import sys, distutils.core, distutils.extension -distutils.core.Extension = Extension -distutils.extension.Extension = Extension -if 'distutils.command.build_ext' in sys.modules: - sys.modules['distutils.command.build_ext'].Extension = Extension diff -Nru tahoe-lafs-1.9.2/setuptools-0.6c16dev3.egg/setuptools/package_index.py tahoe-lafs-1.10.0/setuptools-0.6c16dev3.egg/setuptools/package_index.py --- tahoe-lafs-1.9.2/setuptools-0.6c16dev3.egg/setuptools/package_index.py 2012-05-14 02:07:19.000000000 +0000 +++ tahoe-lafs-1.10.0/setuptools-0.6c16dev3.egg/setuptools/package_index.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,798 +0,0 @@ -"""PyPI and direct package downloading""" -import sys, os.path, re, urlparse, urllib2, shutil, random, socket, cStringIO -import httplib, urllib -from pkg_resources import * -from distutils import log -from distutils.errors import DistutilsError -try: - from hashlib import md5 -except ImportError: - from md5 import md5 -from fnmatch import translate -EGG_FRAGMENT = re.compile(r'^egg=([-A-Za-z0-9_.]+)$') -HREF = re.compile("""href\\s*=\\s*['"]?([^'"> ]+)""", re.I) -# this is here to fix emacs' cruddy broken syntax highlighting -PYPI_MD5 = re.compile( - '([^<]+)\n\s+\\(md5\\)' -) -URL_SCHEME = re.compile('([-+.a-z0-9]{2,}):',re.I).match -EXTENSIONS = ".tar.gz .tar.bz2 .tar .zip .tgz".split() - -def is_local(url_or_fname): - """ Return True if url_or_fname is a "file:" url or if it is a schemaless thing (which is presumably a filename). """ - mo = URL_SCHEME(url_or_fname) - return not (mo and mo.group(1).lower()!='file') - -def url_or_fname_to_fname(url_or_fname): - """ Assert that is_local(url_or_fname) then if it is a "file:" url, parse it and run url2pathname on it, else just return it. """ - assert is_local(url_or_fname) - - mo = URL_SCHEME(url_or_fname) - if mo: - return urllib2.url2pathname(urlparse.urlparse(url)[2]) - else: - return url_or_fname - -__all__ = [ - 'PackageIndex', 'distros_for_url', 'parse_bdist_wininst', - 'interpret_distro_name', -] - -def parse_bdist_wininst(name): - """Return (base,pyversion) or (None,None) for possible .exe name""" - - lower = name.lower() - base, py_ver = None, None - - if lower.endswith('.exe'): - if lower.endswith('.win32.exe'): - base = name[:-10] - elif lower.startswith('.win32-py',-16): - py_ver = name[-7:-4] - base = name[:-16] - - return base,py_ver - -def egg_info_for_url(url): - scheme, server, path, parameters, query, fragment = urlparse.urlparse(url) - base = urllib2.unquote(path.split('/')[-1]) - if server=='sourceforge.net' and base=='download': # XXX Yuck - base = urllib2.unquote(path.split('/')[-2]) - if '#' in base: base, fragment = base.split('#',1) - return base,fragment - -def distros_for_url(url, metadata=None): - """Yield egg or source distribution objects that might be found at a URL""" - base, fragment = egg_info_for_url(url) - for dist in distros_for_location(url, base, metadata): yield dist - if fragment: - match = EGG_FRAGMENT.match(fragment) - if match: - for dist in interpret_distro_name( - url, match.group(1), metadata, precedence = CHECKOUT_DIST - ): - yield dist - -def distros_for_location(location, basename, metadata=None): - """Yield egg or source distribution objects based on basename""" - if basename.endswith('.egg.zip'): - basename = basename[:-4] # strip the .zip - if basename.endswith('.egg') and '-' in basename: - # only one, unambiguous interpretation - return [Distribution.from_location(location, basename, metadata)] - if basename.endswith('.exe'): - win_base, py_ver = parse_bdist_wininst(basename) - if win_base is not None: - return interpret_distro_name( - location, win_base, metadata, py_ver, BINARY_DIST, "win32" - ) - # Try source distro extensions (.zip, .tgz, etc.) - # - for ext in EXTENSIONS: - if basename.endswith(ext): - basename = basename[:-len(ext)] - return interpret_distro_name(location, basename, metadata) - return [] # no extension matched - -def distros_for_filename(filename, metadata=None): - """Yield possible egg or source distribution objects based on a filename""" - return distros_for_location( - normalize_path(filename), os.path.basename(filename), metadata - ) - - -def interpret_distro_name(location, basename, metadata, - py_version=None, precedence=SOURCE_DIST, platform=None -): - """Generate alternative interpretations of a source distro name - - Note: if `location` is a filesystem filename, you should call - ``pkg_resources.normalize_path()`` on it before passing it to this - routine! - """ - # Generate alternative interpretations of a source distro name - # Because some packages are ambiguous as to name/versions split - # e.g. "adns-python-1.1.0", "egenix-mx-commercial", etc. - # So, we generate each possible interepretation (e.g. "adns, python-1.1.0" - # "adns-python, 1.1.0", and "adns-python-1.1.0, no version"). In practice, - # the spurious interpretations should be ignored, because in the event - # there's also an "adns" package, the spurious "python-1.1.0" version will - # compare lower than any numeric version number, and is therefore unlikely - # to match a request for it. It's still a potential problem, though, and - # in the long run PyPI and the distutils should go for "safe" names and - # versions in distribution archive names (sdist and bdist). - - parts = basename.split('-') - if not py_version: - for i,p in enumerate(parts[2:]): - if len(p)==5 and p.startswith('py2.'): - return # It's a bdist_dumb, not an sdist -- bail out - - for p in range(1,len(parts)+1): - yield Distribution( - location, metadata, '-'.join(parts[:p]), '-'.join(parts[p:]), - py_version=py_version, precedence = precedence, - platform = platform - ) - -REL = re.compile("""<([^>]*\srel\s*=\s*['"]?([^'">]+)[^>]*)>""", re.I) -# this line is here to fix emacs' cruddy broken syntax highlighting - -def find_external_links(url, page): - """Find rel="homepage" and rel="download" links in `page`, yielding URLs""" - - for match in REL.finditer(page): - tag, rel = match.groups() - rels = map(str.strip, rel.lower().split(',')) - if 'homepage' in rels or 'download' in rels: - for match in HREF.finditer(tag): - yield urlparse.urljoin(url, htmldecode(match.group(1))) - - for tag in ("Home Page", "Download URL"): - pos = page.find(tag) - if pos!=-1: - match = HREF.search(page,pos) - if match: - yield urlparse.urljoin(url, htmldecode(match.group(1))) - -user_agent = "Python-urllib/%s setuptools/%s" % ( - urllib2.__version__, require('setuptools')[0].version -) - - -class PackageIndex(Environment): - """A distribution index that scans web pages for download URLs""" - - def __init__(self, index_url="http://pypi.python.org/simple", hosts=('*',), - *args, **kw - ): - Environment.__init__(self,*args,**kw) - self.index_url = index_url + "/"[:not index_url.endswith('/')] - self.scanned_urls = {} - self.fetched_urls = {} - self.package_pages = {} - self.allows = re.compile('|'.join(map(translate,hosts))).match - self.to_scan = [] - - - - def process_url(self, url, retrieve=False): - """Evaluate a URL as a possible download, and maybe retrieve it""" - if url in self.scanned_urls and not retrieve: - return - self.scanned_urls[url] = True - if not URL_SCHEME(url): - self.process_filename(url) - return - else: - dists = list(distros_for_url(url)) - if dists: - if not self.url_ok(url): - return - self.debug("Found link: %s", url) - - if dists or not retrieve or url in self.fetched_urls: - map(self.add, dists) - return # don't need the actual page - - if not self.url_ok(url): - self.fetched_urls[url] = True - return - - self.info("Reading %s", url) - self.fetched_urls[url] = True # prevent multiple fetch attempts - f = self.open_url(url, "Download error: %s -- Some packages may not be found!") - if f is None: return - self.fetched_urls[f.url] = True - if 'html' not in f.headers.get('content-type', '').lower(): - f.close() # not html, we can't process it - return - - base = f.url # handle redirects - page = f.read() - f.close() - if url.startswith(self.index_url) and getattr(f,'code',None)!=404: - page = self.process_index(url, page) - for match in HREF.finditer(page): - link = urlparse.urljoin(base, htmldecode(match.group(1))) - self.process_url(link) - - def process_filename(self, fn, nested=False): - # process filenames or directories - if not os.path.exists(fn): - self.warn("Not found: %s", fn) - return - - if os.path.isdir(fn) and not nested: - path = os.path.realpath(fn) - for item in os.listdir(path): - self.process_filename(os.path.join(path,item), True) - - dists = distros_for_filename(fn) - if dists: - self.debug("Found: %s", fn) - map(self.add, dists) - - def url_ok(self, url, fatal=False): - s = URL_SCHEME(url) - if (s and s.group(1).lower()=='file') or self.allows(urlparse.urlparse(url)[1]): - return True - msg = "\nLink to % s ***BLOCKED*** by --allow-hosts\n" - if fatal: - raise DistutilsError(msg % url) - else: - self.warn(msg, url) - - def scan_egg_links(self, search_path): - for item in search_path: - if os.path.isdir(item): - for entry in os.listdir(item): - if entry.endswith('.egg-link'): - self.scan_egg_link(item, entry) - - def scan_egg_link(self, path, entry): - lines = filter(None, map(str.strip, file(os.path.join(path, entry)))) - if len(lines)==2: - for dist in find_distributions(os.path.join(path, lines[0])): - dist.location = os.path.join(path, *lines) - dist.precedence = SOURCE_DIST - self.add(dist) - - def process_index(self,url,page): - """Process the contents of a PyPI page""" - def scan(link): - # Process a URL to see if it's for a package page - if link.startswith(self.index_url): - parts = map( - urllib2.unquote, link[len(self.index_url):].split('/') - ) - if len(parts)==2 and '#' not in parts[1]: - # it's a package page, sanitize and index it - pkg = safe_name(parts[0]) - ver = safe_version(parts[1]) - self.package_pages.setdefault(pkg.lower(),{})[link] = True - return to_filename(pkg), to_filename(ver) - return None, None - - # process an index page into the package-page index - for match in HREF.finditer(page): - scan( urlparse.urljoin(url, htmldecode(match.group(1))) ) - - pkg, ver = scan(url) # ensure this page is in the page index - if pkg: - # process individual package page - for new_url in find_external_links(url, page): - # Process the found URL - base, frag = egg_info_for_url(new_url) - if base.endswith('.py') and not frag: - if ver: - new_url+='#egg=%s-%s' % (pkg,ver) - else: - self.need_version_info(url) - self.scan_url(new_url) - - return PYPI_MD5.sub( - lambda m: '%s' % m.group(1,3,2), page - ) - else: - return "" # no sense double-scanning non-package pages - - - - def need_version_info(self, url): - self.scan_all( - "Page at %s links to .py file(s) without version info; an index " - "scan is required.", url - ) - - def scan_all(self, msg=None, *args): - if self.index_url not in self.fetched_urls: - if msg: self.warn(msg,*args) - self.info( - "Scanning index of all packages (this may take a while)" - ) - self.scan_url(self.index_url) - - def find_packages(self, requirement): - self.scan_url(self.index_url + requirement.unsafe_name+'/') - - if not self.package_pages.get(requirement.key): - # Fall back to safe version of the name - self.scan_url(self.index_url + requirement.project_name+'/') - - if not self.package_pages.get(requirement.key): - # We couldn't find the target package, so search the index page too - self.not_found_in_index(requirement) - - for url in list(self.package_pages.get(requirement.key,())): - # scan each page that might be related to the desired package - self.scan_url(url) - - def obtain(self, requirement, installer=None): - self.prescan(); self.find_packages(requirement) - for dist in self[requirement.key]: - if dist in requirement: - return dist - self.debug("%s does not match %s", requirement, dist) - return super(PackageIndex, self).obtain(requirement,installer) - - - - - - def check_md5(self, cs, info, filename, tfp): - if re.match('md5=[0-9a-f]{32}$', info): - self.debug("Validating md5 checksum for %s", filename) - if cs.hexdigest()!=info[4:]: - tfp.close() - os.unlink(filename) - raise DistutilsError( - "MD5 validation failed for "+os.path.basename(filename)+ - "; possible download problem?" - ) - - def add_find_links(self, urls): - """Add `urls` to the list that will be prescanned for searches""" - for url in urls: - if ( - self.to_scan is None # if we have already "gone online" - or not URL_SCHEME(url) # or it's a local file/directory - or url.startswith('file:') - or list(distros_for_url(url)) # or a direct package link - ): - # then go ahead and process it now - self.scan_url(url) - else: - # otherwise, defer retrieval till later - self.to_scan.append(url) - - def prescan(self): - """Scan urls scheduled for prescanning (e.g. --find-links)""" - if self.to_scan: - map(self.scan_url, self.to_scan) - self.to_scan = None # from now on, go ahead and process immediately - - def not_found_in_index(self, requirement): - if self[requirement.key]: # we've seen at least one distro - meth, msg = self.info, "Couldn't retrieve index page for %r" - else: # no distros seen for this name, might be misspelled - meth, msg = (self.warn, - "Couldn't find index page for %r (maybe misspelled?)") - meth(msg, requirement.unsafe_name) - self.scan_all() - - def download(self, spec, tmpdir): - """Locate and/or download `spec` to `tmpdir`, returning a local path - - `spec` may be a ``Requirement`` object, or a string containing a URL, - an existing local filename, or a project/version requirement spec - (i.e. the string form of a ``Requirement`` object). If it is the URL - of a .py file with an unambiguous ``#egg=name-version`` tag (i.e., one - that escapes ``-`` as ``_`` throughout), a trivial ``setup.py`` is - automatically created alongside the downloaded file. - - If `spec` is a ``Requirement`` object or a string containing a - project/version requirement spec, this method returns the location of - a matching distribution (possibly after downloading it to `tmpdir`). - If `spec` is a locally existing file or directory name, it is simply - returned unchanged. If `spec` is a URL, it is downloaded to a subpath - of `tmpdir`, and the local filename is returned. Various errors may be - raised if a problem occurs during downloading. - """ - if not isinstance(spec,Requirement): - scheme = URL_SCHEME(spec) - if scheme: - # It's a url, download it to tmpdir - found = self._download_url(scheme.group(1), spec, tmpdir) - base, fragment = egg_info_for_url(spec) - if base.endswith('.py'): - found = self.gen_setup(found,fragment,tmpdir) - return found - elif os.path.exists(spec): - # Existing file or directory, just return it - return spec - else: - try: - spec = Requirement.parse(spec) - except ValueError: - raise DistutilsError( - "Not a URL, existing file, or requirement spec: %r" % - (spec,) - ) - return getattr(self.fetch_distribution(spec, tmpdir),'location',None) - - - def fetch_distribution(self, - requirement, tmpdir, force_scan=False, source=False, develop_ok=False, - local_index=None, - ): - """Obtain a distribution suitable for fulfilling `requirement` - - `requirement` must be a ``pkg_resources.Requirement`` instance. - If necessary, or if the `force_scan` flag is set, the requirement is - searched for in the (online) package index as well as the locally - installed packages. If a distribution matching `requirement` is found, - the returned distribution's ``location`` is the value you would have - gotten from calling the ``download()`` method with the matching - distribution's URL or filename. If no matching distribution is found, - ``None`` is returned. - - If the `source` flag is set, only source distributions and source - checkout links will be considered. Unless the `develop_ok` flag is - set, development and system eggs (i.e., those using the ``.egg-info`` - format) will be ignored. - """ - # process a Requirement - self.info("Searching for %s", requirement) - skipped = {} - dist = None - - def find(env, req): - # Find a matching distribution; may be called more than once - - # first try to find a local dist - for allow_remote in (False, True): - # then try to find a platform-dependent dist - for allow_platform_independent in (False, True): - for dist in env[req.key]: - if dist.precedence==DEVELOP_DIST and not develop_ok: - if dist not in skipped: - self.warn("Skipping development or system egg: %s",dist) - skipped[dist] = 1 - continue - - if ((is_local(dist.location) or allow_remote) and - (dist in req) and - ((allow_platform_independent or dist.platform is not None) and - (dist.precedence<=SOURCE_DIST or not source))): - return dist - - if force_scan: - self.prescan() - self.find_packages(requirement) - dist = find(self, requirement) - - if local_index is not None: - dist = dist or find(local_index, requirement) - - if dist is None and self.to_scan is not None: - self.prescan() - dist = find(self, requirement) - - if dist is None and not force_scan: - self.find_packages(requirement) - dist = find(self, requirement) - - if dist is None: - self.warn( - "No local packages or download links found for %s%s", - (source and "a source distribution of " or ""), - requirement, - ) - else: - self.info("Best match: %s", dist) - return dist.clone(location=self.download(dist.location, tmpdir)) - - - def fetch(self, requirement, tmpdir, force_scan=False, source=False): - """Obtain a file suitable for fulfilling `requirement` - - DEPRECATED; use the ``fetch_distribution()`` method now instead. For - backward compatibility, this routine is identical but returns the - ``location`` of the downloaded distribution instead of a distribution - object. - """ - dist = self.fetch_distribution(requirement,tmpdir,force_scan,source) - if dist is not None: - return dist.location - return None - - - def gen_setup(self, filename, fragment, tmpdir): - match = EGG_FRAGMENT.match(fragment) - dists = match and [d for d in - interpret_distro_name(filename, match.group(1), None) if d.version - ] or [] - - if len(dists)==1: # unambiguous ``#egg`` fragment - basename = os.path.basename(filename) - - # Make sure the file has been downloaded to the temp dir. - if os.path.dirname(filename) != tmpdir: - dst = os.path.join(tmpdir, basename) - from setuptools.command.easy_install import samefile - if not samefile(filename, dst): - shutil.copy2(filename, dst) - filename=dst - - file = open(os.path.join(tmpdir, 'setup.py'), 'w') - file.write( - "from setuptools import setup\n" - "setup(name=%r, version=%r, py_modules=[%r])\n" - % ( - dists[0].project_name, dists[0].version, - os.path.splitext(basename)[0] - ) - ) - file.close() - return filename - - elif match: - raise DistutilsError( - "Can't unambiguously interpret project/version identifier %r; " - "any dashes in the name or version should be escaped using " - "underscores. %r" % (fragment,dists) - ) - else: - raise DistutilsError( - "Can't process plain .py files without an '#egg=name-version'" - " suffix to enable automatic setup script generation." - ) - - dl_blocksize = 8192 - def _download_to(self, url, filename): - self.info("Downloading %s", url) - # Download the file - fp, tfp, info = None, None, None - try: - if '#' in url: - url, info = url.split('#', 1) - fp = self.open_url(url) - if isinstance(fp, urllib2.HTTPError): - raise DistutilsError( - "Can't download %s: %s %s" % (url, fp.code,fp.msg) - ) - cs = md5() - headers = fp.info() - blocknum = 0 - bs = self.dl_blocksize - size = -1 - if "content-length" in headers: - size = int(headers["Content-Length"]) - self.reporthook(url, filename, blocknum, bs, size) - tfp = open(filename,'wb') - while True: - block = fp.read(bs) - if block: - cs.update(block) - tfp.write(block) - blocknum += 1 - self.reporthook(url, filename, blocknum, bs, size) - else: - break - if info: self.check_md5(cs, info, filename, tfp) - return headers - finally: - if fp: fp.close() - if tfp: tfp.close() - - def reporthook(self, url, filename, blocknum, blksize, size): - pass # no-op - - - def open_url(self, url, warning=None): - if url.startswith('file:'): return local_open(url) - try: - return open_with_auth(url) - except urllib2.HTTPError, v: - return v - except urllib2.URLError, v: - reason = v.reason - except httplib.HTTPException, v: - reason = "%s: %s" % (v.__doc__ or v.__class__.__name__, v) - if warning: - self.warn(warning, reason) - else: - raise DistutilsError("Download error for %s: %s" % (url, reason)) - - def _download_url(self, scheme, url, tmpdir): - # Determine download filename - # - name, fragment = egg_info_for_url(url) - if name: - while '..' in name: - name = name.replace('..','.').replace('\\','_') - else: - name = "__downloaded__" # default if URL has no path contents - - if name.endswith('.egg.zip'): - name = name[:-4] # strip the extra .zip before download - - filename = os.path.join(tmpdir,name) - - # Download the file - # - if scheme=='svn' or scheme.startswith('svn+'): - return self._download_svn(url, filename) - elif scheme=='file': - return urllib2.url2pathname(urlparse.urlparse(url)[2]) - else: - self.url_ok(url, True) # raises error if not allowed - return self._attempt_download(url, filename) - - - def scan_url(self, url): - self.process_url(url, True) - - - def _attempt_download(self, url, filename): - headers = self._download_to(url, filename) - if 'html' in headers.get('content-type','').lower(): - return self._download_html(url, headers, filename) - else: - return filename - - def _download_html(self, url, headers, filename): - file = open(filename) - for line in file: - if line.strip(): - # Check for a subversion index page - if re.search(r'([^- ]+ - )?Revision \d+:', line): - # it's a subversion index page: - file.close() - os.unlink(filename) - return self._download_svn(url, filename) - break # not an index page - file.close() - os.unlink(filename) - raise DistutilsError("Unexpected HTML page found at "+url) - - def _download_svn(self, url, filename): - url = url.split('#',1)[0] # remove any fragment for svn's sake - self.info("Doing subversion checkout from %s to %s", url, filename) - os.system("svn checkout -q %s %s" % (url, filename)) - return filename - - def debug(self, msg, *args): - log.debug(msg, *args) - - def info(self, msg, *args): - log.info(msg, *args) - - def warn(self, msg, *args): - log.warn(msg, *args) - -# This pattern matches a character entity reference (a decimal numeric -# references, a hexadecimal numeric reference, or a named reference). -entity_sub = re.compile(r'&(#(\d+|x[\da-fA-F]+)|[\w.:-]+);?').sub - -def uchr(c): - if not isinstance(c, int): - return c - if c>255: return unichr(c) - return chr(c) - -def decode_entity(match): - what = match.group(1) - if what.startswith('#x'): - what = int(what[2:], 16) - elif what.startswith('#'): - what = int(what[1:]) - else: - from htmlentitydefs import name2codepoint - what = name2codepoint.get(what, match.group(0)) - return uchr(what) - -def htmldecode(text): - """Decode HTML entities in the given text.""" - return entity_sub(decode_entity, text) - - - - - - - - - - - - - - - - - -def open_with_auth(url): - """Open a urllib2 request, handling HTTP authentication""" - - scheme, netloc, path, params, query, frag = urlparse.urlparse(url) - - if scheme in ('http', 'https'): - auth, host = urllib.splituser(netloc) - else: - auth = None - - if auth: - auth = "Basic " + urllib2.unquote(auth).encode('base64').strip() - new_url = urlparse.urlunparse((scheme,host,path,params,query,frag)) - request = urllib2.Request(new_url) - request.add_header("Authorization", auth) - else: - request = urllib2.Request(url) - - request.add_header('User-Agent', user_agent) - fp = urllib2.urlopen(request) - - if auth: - # Put authentication info back into request URL if same host, - # so that links found on the page will work - s2, h2, path2, param2, query2, frag2 = urlparse.urlparse(fp.url) - if s2==scheme and h2==host: - fp.url = urlparse.urlunparse((s2,netloc,path2,param2,query2,frag2)) - - return fp - - - - - - - - - - - - -def fix_sf_url(url): - return url # backward compatibility - -def local_open(url): - """Read a local path, with special support for directories""" - scheme, server, path, param, query, frag = urlparse.urlparse(url) - filename = urllib2.url2pathname(path) - if os.path.isfile(filename): - return urllib2.urlopen(url) - elif path.endswith('/') and os.path.isdir(filename): - files = [] - for f in os.listdir(filename): - if f=='index.html': - body = open(os.path.join(filename,f),'rb').read() - break - elif os.path.isdir(os.path.join(filename,f)): - f+='/' - files.append("<a href=%r>%s</a>" % (f,f)) - else: - body = ("<html><head><title>%s" % url) + \ - "%s" % '\n'.join(files) - status, message = 200, "OK" - else: - status, message, body = 404, "Path not found", "Not found" - - return urllib2.HTTPError(url, status, message, - {'content-type':'text/html'}, cStringIO.StringIO(body)) - - - - - - - - - - - - - -# this line is a kludge to keep the trailing blank lines for pje's editor diff -Nru tahoe-lafs-1.9.2/setuptools-0.6c16dev3.egg/setuptools/sandbox.py tahoe-lafs-1.10.0/setuptools-0.6c16dev3.egg/setuptools/sandbox.py --- tahoe-lafs-1.9.2/setuptools-0.6c16dev3.egg/setuptools/sandbox.py 2012-05-14 02:07:19.000000000 +0000 +++ tahoe-lafs-1.10.0/setuptools-0.6c16dev3.egg/setuptools/sandbox.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,287 +0,0 @@ -import os, sys, __builtin__, tempfile, operator, pkg_resources -_os = sys.modules[os.name] -_open = open -_file = file - -from distutils.errors import DistutilsError -from pkg_resources import working_set - -__all__ = [ - "AbstractSandbox", "DirectorySandbox", "SandboxViolation", "run_setup", -] - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -def run_setup(setup_script, args): - """Run a distutils setup script, sandboxed in its directory""" - old_dir = os.getcwd() - save_argv = sys.argv[:] - save_path = sys.path[:] - setup_dir = os.path.abspath(os.path.dirname(setup_script)) - temp_dir = os.path.join(setup_dir,'temp') - if not os.path.isdir(temp_dir): os.makedirs(temp_dir) - save_tmp = tempfile.tempdir - save_modules = sys.modules.copy() - pr_state = pkg_resources.__getstate__() - try: - tempfile.tempdir = temp_dir; os.chdir(setup_dir) - try: - sys.argv[:] = [setup_script]+list(args) - sys.path.insert(0, setup_dir) - # reset to include setup dir, w/clean callback list - working_set.__init__() - working_set.callbacks.append(lambda dist:dist.activate()) - DirectorySandbox(setup_dir).run( - lambda: execfile( - "setup.py", - {'__file__':setup_script, '__name__':'__main__'} - ) - ) - except SystemExit, v: - if v.args and v.args[0]: - raise - # Normal exit, just return - finally: - pkg_resources.__setstate__(pr_state) - sys.modules.update(save_modules) - for key in list(sys.modules): - if key not in save_modules: del sys.modules[key] - os.chdir(old_dir) - sys.path[:] = save_path - sys.argv[:] = save_argv - tempfile.tempdir = save_tmp - - - -class AbstractSandbox: - """Wrap 'os' module and 'open()' builtin for virtualizing setup scripts""" - - _active = False - - def __init__(self): - self._attrs = [ - name for name in dir(_os) - if not name.startswith('_') and hasattr(self,name) - ] - - def _copy(self, source): - for name in self._attrs: - setattr(os, name, getattr(source,name)) - - def run(self, func): - """Run 'func' under os sandboxing""" - try: - self._copy(self) - __builtin__.file = self._file - __builtin__.open = self._open - self._active = True - return func() - finally: - self._active = False - __builtin__.open = _open - __builtin__.file = _file - self._copy(_os) - - def _mk_dual_path_wrapper(name): - original = getattr(_os,name) - def wrap(self,src,dst,*args,**kw): - if self._active: - src,dst = self._remap_pair(name,src,dst,*args,**kw) - return original(src,dst,*args,**kw) - return wrap - - for name in ["rename", "link", "symlink"]: - if hasattr(_os,name): locals()[name] = _mk_dual_path_wrapper(name) - - - def _mk_single_path_wrapper(name, original=None): - original = original or getattr(_os,name) - def wrap(self,path,*args,**kw): - if self._active: - path = self._remap_input(name,path,*args,**kw) - return original(path,*args,**kw) - return wrap - - _open = _mk_single_path_wrapper('open', _open) - _file = _mk_single_path_wrapper('file', _file) - for name in [ - "stat", "listdir", "chdir", "open", "chmod", "chown", "mkdir", - "remove", "unlink", "rmdir", "utime", "lchown", "chroot", "lstat", - "startfile", "mkfifo", "mknod", "pathconf", "access" - ]: - if hasattr(_os,name): locals()[name] = _mk_single_path_wrapper(name) - - def _mk_single_with_return(name): - original = getattr(_os,name) - def wrap(self,path,*args,**kw): - if self._active: - path = self._remap_input(name,path,*args,**kw) - return self._remap_output(name, original(path,*args,**kw)) - return original(path,*args,**kw) - return wrap - - for name in ['readlink', 'tempnam']: - if hasattr(_os,name): locals()[name] = _mk_single_with_return(name) - - def _mk_query(name): - original = getattr(_os,name) - def wrap(self,*args,**kw): - retval = original(*args,**kw) - if self._active: - return self._remap_output(name, retval) - return retval - return wrap - - for name in ['getcwd', 'tmpnam']: - if hasattr(_os,name): locals()[name] = _mk_query(name) - - def _validate_path(self,path): - """Called to remap or validate any path, whether input or output""" - return path - - def _remap_input(self,operation,path,*args,**kw): - """Called for path inputs""" - return self._validate_path(path) - - def _remap_output(self,operation,path): - """Called for path outputs""" - return self._validate_path(path) - - def _remap_pair(self,operation,src,dst,*args,**kw): - """Called for path pairs like rename, link, and symlink operations""" - return ( - self._remap_input(operation+'-from',src,*args,**kw), - self._remap_input(operation+'-to',dst,*args,**kw) - ) - - -class DirectorySandbox(AbstractSandbox): - """Restrict operations to a single subdirectory - pseudo-chroot""" - - write_ops = dict.fromkeys([ - "open", "chmod", "chown", "mkdir", "remove", "unlink", "rmdir", - "utime", "lchown", "chroot", "mkfifo", "mknod", "tempnam", - ]) - - def __init__(self,sandbox): - self._sandbox = os.path.normcase(os.path.realpath(sandbox)) - self._prefix = os.path.join(self._sandbox,'') - AbstractSandbox.__init__(self) - - def _violation(self, operation, *args, **kw): - raise SandboxViolation(operation, args, kw) - - def _open(self, path, mode='r', *args, **kw): - if mode not in ('r', 'rt', 'rb', 'rU', 'U') and not self._ok(path): - self._violation("open", path, mode, *args, **kw) - return _open(path,mode,*args,**kw) - - def tmpnam(self): self._violation("tmpnam") - - def _ok(self,path): - if hasattr(_os,'devnull') and path==_os.devnull: return True - active = self._active - try: - self._active = False - realpath = os.path.normcase(os.path.realpath(path)) - if realpath==self._sandbox or realpath.startswith(self._prefix): - return True - finally: - self._active = active - - def _remap_input(self,operation,path,*args,**kw): - """Called for path inputs""" - if operation in self.write_ops and not self._ok(path): - self._violation(operation, os.path.realpath(path), *args, **kw) - return path - - def _remap_pair(self,operation,src,dst,*args,**kw): - """Called for path pairs like rename, link, and symlink operations""" - if not self._ok(src) or not self._ok(dst): - self._violation(operation, src, dst, *args, **kw) - return (src,dst) - - def _file(self, path, mode='r', *args, **kw): - if mode not in ('r', 'rt', 'rb', 'rU', 'U') and not self._ok(path): - self._violation("file", path, mode, *args, **kw) - return _file(path,mode,*args,**kw) - - def open(self, file, flags, mode=0777): - """Called for low-level os.open()""" - if flags & WRITE_FLAGS and not self._ok(file): - self._violation("os.open", file, flags, mode) - return _os.open(file,flags,mode) - -WRITE_FLAGS = reduce( - operator.or_, [getattr(_os, a, 0) for a in - "O_WRONLY O_RDWR O_APPEND O_CREAT O_TRUNC O_TEMPORARY".split()] -) - -class SandboxViolation(DistutilsError): - """A setup script attempted to modify the filesystem outside the sandbox""" - - def __str__(self): - return """SandboxViolation: %s%r %s - -The package setup script has attempted to modify files on your system -that are not within the EasyInstall build area, and has been aborted. - -This package cannot be safely installed by EasyInstall, and may not -support alternate installation locations even if you run its setup -script by hand. Please inform the package's author and the EasyInstall -maintainers to find out if a fix or workaround is available.""" % self.args - - - - - - - - - - - - - - - - - - - - - - - - - - - -# diff -Nru tahoe-lafs-1.9.2/setuptools-0.6c16dev3.egg/setuptools/site-patch.py tahoe-lafs-1.10.0/setuptools-0.6c16dev3.egg/setuptools/site-patch.py --- tahoe-lafs-1.9.2/setuptools-0.6c16dev3.egg/setuptools/site-patch.py 2012-05-14 02:07:19.000000000 +0000 +++ tahoe-lafs-1.10.0/setuptools-0.6c16dev3.egg/setuptools/site-patch.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,74 +0,0 @@ -def __boot(): - import sys, imp, os, os.path - PYTHONPATH = os.environ.get('PYTHONPATH') - if PYTHONPATH is None or (sys.platform=='win32' and not PYTHONPATH): - PYTHONPATH = [] - else: - PYTHONPATH = PYTHONPATH.split(os.pathsep) - - pic = getattr(sys,'path_importer_cache',{}) - stdpath = sys.path[len(PYTHONPATH):] - mydir = os.path.dirname(__file__) - #print "searching",stdpath,sys.path - - for item in stdpath: - if item==mydir or not item: - continue # skip if current dir. on Windows, or my own directory - importer = pic.get(item) - if importer is not None: - loader = importer.find_module('site') - if loader is not None: - # This should actually reload the current module - loader.load_module('site') - break - else: - try: - stream, path, descr = imp.find_module('site',[item]) - except ImportError: - continue - if stream is None: - continue - try: - # This should actually reload the current module - imp.load_module('site',stream,path,descr) - finally: - stream.close() - break - else: - raise ImportError("Couldn't find the real 'site' module") - - #print "loaded", __file__ - - known_paths = dict([(makepath(item)[1],1) for item in sys.path]) # 2.2 comp - - oldpos = getattr(sys,'__egginsert',0) # save old insertion position - sys.__egginsert = 0 # and reset the current one - - for item in PYTHONPATH: - addsitedir(item) - - sys.__egginsert += oldpos # restore effective old position - - d,nd = makepath(stdpath[0]) - insert_at = None - new_path = [] - - for item in sys.path: - p,np = makepath(item) - - if np==nd and insert_at is None: - # We've hit the first 'system' path entry, so added entries go here - insert_at = len(new_path) - - if np in known_paths or insert_at is None: - new_path.append(item) - else: - # new path after the insert point, back-insert it - new_path.insert(insert_at, item) - insert_at += 1 - - sys.path[:] = new_path - -if __name__=='site': - __boot() - del __boot diff -Nru tahoe-lafs-1.9.2/setuptools-0.6c16dev3.egg/setuptools/tests/__init__.py tahoe-lafs-1.10.0/setuptools-0.6c16dev3.egg/setuptools/tests/__init__.py --- tahoe-lafs-1.9.2/setuptools-0.6c16dev3.egg/setuptools/tests/__init__.py 2012-05-14 02:07:19.000000000 +0000 +++ tahoe-lafs-1.10.0/setuptools-0.6c16dev3.egg/setuptools/tests/__init__.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,369 +0,0 @@ -"""Tests for the 'setuptools' package""" -from unittest import TestSuite, TestCase, makeSuite, defaultTestLoader -import distutils.core, distutils.cmd -from distutils.errors import DistutilsOptionError, DistutilsPlatformError -from distutils.errors import DistutilsSetupError -import setuptools, setuptools.dist -from setuptools import Feature -from distutils.core import Extension -extract_constant, get_module_constant = None, None -from setuptools.depends import * -from distutils.version import StrictVersion, LooseVersion -from distutils.util import convert_path -import sys, os.path - -def additional_tests(): - import doctest, unittest - suite = unittest.TestSuite(( - doctest.DocFileSuite('api_tests.txt', - optionflags=doctest.ELLIPSIS, package=__name__, - ), - )) - if sys.platform == 'win32': - suite.addTest(doctest.DocFileSuite('win_script_wrapper.txt')) - return suite - -def makeSetup(**args): - """Return distribution from 'setup(**args)', without executing commands""" - - distutils.core._setup_stop_after = "commandline" - - # Don't let system command line leak into tests! - args.setdefault('script_args',['install']) - - try: - return setuptools.setup(**args) - finally: - distutils.core_setup_stop_after = None - - - - -class DependsTests(TestCase): - - def testExtractConst(self): - if not extract_constant: return # skip on non-bytecode platforms - - def f1(): - global x,y,z - x = "test" - y = z - - # unrecognized name - self.assertEqual(extract_constant(f1.func_code,'q', -1), None) - - # constant assigned - self.assertEqual(extract_constant(f1.func_code,'x', -1), "test") - - # expression assigned - self.assertEqual(extract_constant(f1.func_code,'y', -1), -1) - - # recognized name, not assigned - self.assertEqual(extract_constant(f1.func_code,'z', -1), None) - - - def testFindModule(self): - self.assertRaises(ImportError, find_module, 'no-such.-thing') - self.assertRaises(ImportError, find_module, 'setuptools.non-existent') - f,p,i = find_module('setuptools.tests'); f.close() - - def testModuleExtract(self): - if not get_module_constant: return # skip on non-bytecode platforms - from distutils import __version__ - self.assertEqual( - get_module_constant('distutils','__version__'), __version__ - ) - self.assertEqual( - get_module_constant('sys','version'), sys.version - ) - self.assertEqual( - get_module_constant('setuptools.tests','__doc__'),__doc__ - ) - - def testRequire(self): - if not extract_constant: return # skip on non-bytecode platforms - - req = Require('Distutils','1.0.3','distutils') - - self.assertEqual(req.name, 'Distutils') - self.assertEqual(req.module, 'distutils') - self.assertEqual(req.requested_version, '1.0.3') - self.assertEqual(req.attribute, '__version__') - self.assertEqual(req.full_name(), 'Distutils-1.0.3') - - from distutils import __version__ - self.assertEqual(req.get_version(), __version__) - self.failUnless(req.version_ok('1.0.9')) - self.failIf(req.version_ok('0.9.1')) - self.failIf(req.version_ok('unknown')) - - self.failUnless(req.is_present()) - self.failUnless(req.is_current()) - - req = Require('Distutils 3000','03000','distutils',format=LooseVersion) - self.failUnless(req.is_present()) - self.failIf(req.is_current()) - self.failIf(req.version_ok('unknown')) - - req = Require('Do-what-I-mean','1.0','d-w-i-m') - self.failIf(req.is_present()) - self.failIf(req.is_current()) - - req = Require('Tests', None, 'tests', homepage="http://example.com") - self.assertEqual(req.format, None) - self.assertEqual(req.attribute, None) - self.assertEqual(req.requested_version, None) - self.assertEqual(req.full_name(), 'Tests') - self.assertEqual(req.homepage, 'http://example.com') - - paths = [os.path.dirname(p) for p in __path__] - self.failUnless(req.is_present(paths)) - self.failUnless(req.is_current(paths)) - - -class DistroTests(TestCase): - - def setUp(self): - self.e1 = Extension('bar.ext',['bar.c']) - self.e2 = Extension('c.y', ['y.c']) - - self.dist = makeSetup( - packages=['a', 'a.b', 'a.b.c', 'b', 'c'], - py_modules=['b.d','x'], - ext_modules = (self.e1, self.e2), - package_dir = {}, - ) - - - def testDistroType(self): - self.failUnless(isinstance(self.dist,setuptools.dist.Distribution)) - - - def testExcludePackage(self): - self.dist.exclude_package('a') - self.assertEqual(self.dist.packages, ['b','c']) - - self.dist.exclude_package('b') - self.assertEqual(self.dist.packages, ['c']) - self.assertEqual(self.dist.py_modules, ['x']) - self.assertEqual(self.dist.ext_modules, [self.e1, self.e2]) - - self.dist.exclude_package('c') - self.assertEqual(self.dist.packages, []) - self.assertEqual(self.dist.py_modules, ['x']) - self.assertEqual(self.dist.ext_modules, [self.e1]) - - # test removals from unspecified options - makeSetup().exclude_package('x') - - - - - - - - def testIncludeExclude(self): - # remove an extension - self.dist.exclude(ext_modules=[self.e1]) - self.assertEqual(self.dist.ext_modules, [self.e2]) - - # add it back in - self.dist.include(ext_modules=[self.e1]) - self.assertEqual(self.dist.ext_modules, [self.e2, self.e1]) - - # should not add duplicate - self.dist.include(ext_modules=[self.e1]) - self.assertEqual(self.dist.ext_modules, [self.e2, self.e1]) - - def testExcludePackages(self): - self.dist.exclude(packages=['c','b','a']) - self.assertEqual(self.dist.packages, []) - self.assertEqual(self.dist.py_modules, ['x']) - self.assertEqual(self.dist.ext_modules, [self.e1]) - - def testEmpty(self): - dist = makeSetup() - dist.include(packages=['a'], py_modules=['b'], ext_modules=[self.e2]) - dist = makeSetup() - dist.exclude(packages=['a'], py_modules=['b'], ext_modules=[self.e2]) - - def testContents(self): - self.failUnless(self.dist.has_contents_for('a')) - self.dist.exclude_package('a') - self.failIf(self.dist.has_contents_for('a')) - - self.failUnless(self.dist.has_contents_for('b')) - self.dist.exclude_package('b') - self.failIf(self.dist.has_contents_for('b')) - - self.failUnless(self.dist.has_contents_for('c')) - self.dist.exclude_package('c') - self.failIf(self.dist.has_contents_for('c')) - - - - - def testInvalidIncludeExclude(self): - self.assertRaises(DistutilsSetupError, - self.dist.include, nonexistent_option='x' - ) - self.assertRaises(DistutilsSetupError, - self.dist.exclude, nonexistent_option='x' - ) - self.assertRaises(DistutilsSetupError, - self.dist.include, packages={'x':'y'} - ) - self.assertRaises(DistutilsSetupError, - self.dist.exclude, packages={'x':'y'} - ) - self.assertRaises(DistutilsSetupError, - self.dist.include, ext_modules={'x':'y'} - ) - self.assertRaises(DistutilsSetupError, - self.dist.exclude, ext_modules={'x':'y'} - ) - - self.assertRaises(DistutilsSetupError, - self.dist.include, package_dir=['q'] - ) - self.assertRaises(DistutilsSetupError, - self.dist.exclude, package_dir=['q'] - ) - - - - - - - - - - - - - - - -class FeatureTests(TestCase): - - def setUp(self): - self.req = Require('Distutils','1.0.3','distutils') - self.dist = makeSetup( - features={ - 'foo': Feature("foo",standard=True,require_features=['baz',self.req]), - 'bar': Feature("bar", standard=True, packages=['pkg.bar'], - py_modules=['bar_et'], remove=['bar.ext'], - ), - 'baz': Feature( - "baz", optional=False, packages=['pkg.baz'], - scripts = ['scripts/baz_it'], - libraries=[('libfoo','foo/foofoo.c')] - ), - 'dwim': Feature("DWIM", available=False, remove='bazish'), - }, - script_args=['--without-bar', 'install'], - packages = ['pkg.bar', 'pkg.foo'], - py_modules = ['bar_et', 'bazish'], - ext_modules = [Extension('bar.ext',['bar.c'])] - ) - - def testDefaults(self): - self.failIf( - Feature( - "test",standard=True,remove='x',available=False - ).include_by_default() - ) - self.failUnless( - Feature("test",standard=True,remove='x').include_by_default() - ) - # Feature must have either kwargs, removes, or require_features - self.assertRaises(DistutilsSetupError, Feature, "test") - - def testAvailability(self): - self.assertRaises( - DistutilsPlatformError, - self.dist.features['dwim'].include_in, self.dist - ) - - def testFeatureOptions(self): - dist = self.dist - self.failUnless( - ('with-dwim',None,'include DWIM') in dist.feature_options - ) - self.failUnless( - ('without-dwim',None,'exclude DWIM (default)') in dist.feature_options - ) - self.failUnless( - ('with-bar',None,'include bar (default)') in dist.feature_options - ) - self.failUnless( - ('without-bar',None,'exclude bar') in dist.feature_options - ) - self.assertEqual(dist.feature_negopt['without-foo'],'with-foo') - self.assertEqual(dist.feature_negopt['without-bar'],'with-bar') - self.assertEqual(dist.feature_negopt['without-dwim'],'with-dwim') - self.failIf('without-baz' in dist.feature_negopt) - - def testUseFeatures(self): - dist = self.dist - self.assertEqual(dist.with_foo,1) - self.assertEqual(dist.with_bar,0) - self.assertEqual(dist.with_baz,1) - self.failIf('bar_et' in dist.py_modules) - self.failIf('pkg.bar' in dist.packages) - self.failUnless('pkg.baz' in dist.packages) - self.failUnless('scripts/baz_it' in dist.scripts) - self.failUnless(('libfoo','foo/foofoo.c') in dist.libraries) - self.assertEqual(dist.ext_modules,[]) - self.assertEqual(dist.require_features, [self.req]) - - # If we ask for bar, it should fail because we explicitly disabled - # it on the command line - self.assertRaises(DistutilsOptionError, dist.include_feature, 'bar') - - def testFeatureWithInvalidRemove(self): - self.assertRaises( - SystemExit, makeSetup, features = {'x':Feature('x', remove='y')} - ) - -class TestCommandTests(TestCase): - - def testTestIsCommand(self): - test_cmd = makeSetup().get_command_obj('test') - self.failUnless(isinstance(test_cmd, distutils.cmd.Command)) - - def testLongOptSuiteWNoDefault(self): - ts1 = makeSetup(script_args=['test','--test-suite=foo.tests.suite']) - ts1 = ts1.get_command_obj('test') - ts1.ensure_finalized() - self.assertEqual(ts1.test_suite, 'foo.tests.suite') - - def testDefaultSuite(self): - ts2 = makeSetup(test_suite='bar.tests.suite').get_command_obj('test') - ts2.ensure_finalized() - self.assertEqual(ts2.test_suite, 'bar.tests.suite') - - def testDefaultWModuleOnCmdLine(self): - ts3 = makeSetup( - test_suite='bar.tests', - script_args=['test','-m','foo.tests'] - ).get_command_obj('test') - ts3.ensure_finalized() - self.assertEqual(ts3.test_module, 'foo.tests') - self.assertEqual(ts3.test_suite, 'foo.tests.test_suite') - - def testConflictingOptions(self): - ts4 = makeSetup( - script_args=['test','-m','bar.tests', '-s','foo.tests.suite'] - ).get_command_obj('test') - self.assertRaises(DistutilsOptionError, ts4.ensure_finalized) - - def testNoSuite(self): - ts5 = makeSetup().get_command_obj('test') - ts5.ensure_finalized() - self.assertEqual(ts5.test_suite, None) - - - - - diff -Nru tahoe-lafs-1.9.2/setuptools-0.6c16dev3.egg/setuptools/tests/test_packageindex.py tahoe-lafs-1.10.0/setuptools-0.6c16dev3.egg/setuptools/tests/test_packageindex.py --- tahoe-lafs-1.9.2/setuptools-0.6c16dev3.egg/setuptools/tests/test_packageindex.py 2012-05-14 02:07:19.000000000 +0000 +++ tahoe-lafs-1.10.0/setuptools-0.6c16dev3.egg/setuptools/tests/test_packageindex.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,27 +0,0 @@ -"""Package Index Tests -""" -# More would be better! - -import os, shutil, tempfile, unittest, urllib2 -import pkg_resources -import setuptools.package_index - -class TestPackageIndex(unittest.TestCase): - - def test_bad_urls(self): - index = setuptools.package_index.PackageIndex() - url = 'http://127.0.0.1/nonesuch/test_package_index' - try: - v = index.open_url(url) - except Exception, v: - self.assert_(url in str(v)) - else: - self.assert_(isinstance(v,urllib2.HTTPError)) - - def test_url_ok(self): - index = setuptools.package_index.PackageIndex( - hosts=('www.example.com',) - ) - url = 'file:///tmp/test_package_index' - self.assert_(index.url_ok(url, True)) - diff -Nru tahoe-lafs-1.9.2/setuptools-0.6c16dev3.egg/setuptools/tests/test_resources.py tahoe-lafs-1.10.0/setuptools-0.6c16dev3.egg/setuptools/tests/test_resources.py --- tahoe-lafs-1.9.2/setuptools-0.6c16dev3.egg/setuptools/tests/test_resources.py 2012-05-14 02:07:19.000000000 +0000 +++ tahoe-lafs-1.10.0/setuptools-0.6c16dev3.egg/setuptools/tests/test_resources.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,533 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# NOTE: the shebang and encoding lines are for ScriptHeaderTests; do not remove -from unittest import TestCase, makeSuite; from pkg_resources import * -from setuptools.command.easy_install import get_script_header, is_sh -import os, pkg_resources, sys, StringIO -try: frozenset -except NameError: - from sets import ImmutableSet as frozenset - -class Metadata(EmptyProvider): - """Mock object to return metadata as if from an on-disk distribution""" - - def __init__(self,*pairs): - self.metadata = dict(pairs) - - def has_metadata(self,name): - return name in self.metadata - - def get_metadata(self,name): - return self.metadata[name] - - def get_metadata_lines(self,name): - return yield_lines(self.get_metadata(name)) - -class DistroTests(TestCase): - - def testCollection(self): - # empty path should produce no distributions - ad = Environment([], platform=None, python=None) - self.assertEqual(list(ad), []) - self.assertEqual(ad['FooPkg'],[]) - ad.add(Distribution.from_filename("FooPkg-1.3_1.egg")) - ad.add(Distribution.from_filename("FooPkg-1.4-py2.4-win32.egg")) - ad.add(Distribution.from_filename("FooPkg-1.2-py2.4.egg")) - - # Name is in there now - self.failUnless(ad['FooPkg']) - # But only 1 package - self.assertEqual(list(ad), ['foopkg']) - - # Distributions sort by version - self.assertEqual( - [dist.version for dist in ad['FooPkg']], ['1.4','1.3-1','1.2'] - ) - # Removing a distribution leaves sequence alone - ad.remove(ad['FooPkg'][1]) - self.assertEqual( - [dist.version for dist in ad['FooPkg']], ['1.4','1.2'] - ) - # And inserting adds them in order - ad.add(Distribution.from_filename("FooPkg-1.9.egg")) - self.assertEqual( - [dist.version for dist in ad['FooPkg']], ['1.9','1.4','1.2'] - ) - - ws = WorkingSet([]) - foo12 = Distribution.from_filename("FooPkg-1.2-py2.4.egg") - foo14 = Distribution.from_filename("FooPkg-1.4-py2.4-win32.egg") - req, = parse_requirements("FooPkg>=1.3") - - # Nominal case: no distros on path, should yield all applicable - self.assertEqual(ad.best_match(req,ws).version, '1.9') - # If a matching distro is already installed, should return only that - ws.add(foo14); self.assertEqual(ad.best_match(req,ws).version, '1.4') - - # If the first matching distro is unsuitable, it's a version conflict - ws = WorkingSet([]); ws.add(foo12); ws.add(foo14) - self.assertRaises(VersionConflict, ad.best_match, req, ws) - - # If more than one match on the path, the first one takes precedence - ws = WorkingSet([]); ws.add(foo14); ws.add(foo12); ws.add(foo14); - self.assertEqual(ad.best_match(req,ws).version, '1.4') - - def checkFooPkg(self,d): - self.assertEqual(d.project_name, "FooPkg") - self.assertEqual(d.key, "foopkg") - self.assertEqual(d.version, "1.3-1") - self.assertEqual(d.py_version, "2.4") - self.assertEqual(d.platform, "win32") - self.assertEqual(d.parsed_version, parse_version("1.3-1")) - - def testDistroBasics(self): - d = Distribution( - "/some/path", - project_name="FooPkg",version="1.3-1",py_version="2.4",platform="win32" - ) - self.checkFooPkg(d) - - d = Distribution("/some/path") - self.assertEqual(d.py_version, sys.version[:3]) - self.assertEqual(d.platform, None) - - def testDistroParse(self): - d = Distribution.from_filename("FooPkg-1.3_1-py2.4-win32.egg") - self.checkFooPkg(d) - d = Distribution.from_filename("FooPkg-1.3_1-py2.4-win32.egg-info") - self.checkFooPkg(d) - - def testDistroMetadata(self): - d = Distribution( - "/some/path", project_name="FooPkg", py_version="2.4", platform="win32", - metadata = Metadata( - ('PKG-INFO',"Metadata-Version: 1.0\nVersion: 1.3-1\n") - ) - ) - self.checkFooPkg(d) - - - def distRequires(self, txt): - return Distribution("/foo", metadata=Metadata(('depends.txt', txt))) - - def checkRequires(self, dist, txt, extras=()): - self.assertEqual( - list(dist.requires(extras)), - list(parse_requirements(txt)) - ) - - def testDistroDependsSimple(self): - for v in "Twisted>=1.5", "Twisted>=1.5\nZConfig>=2.0": - self.checkRequires(self.distRequires(v), v) - - - def testResolve(self): - ad = Environment([]); ws = WorkingSet([]) - # Resolving no requirements -> nothing to install - self.assertEqual( list(ws.resolve([],ad)), [] ) - # Request something not in the collection -> DistributionNotFound - self.assertRaises( - DistributionNotFound, ws.resolve, parse_requirements("Foo"), ad - ) - Foo = Distribution.from_filename( - "/foo_dir/Foo-1.2.egg", - metadata=Metadata(('depends.txt', "[bar]\nBaz>=2.0")) - ) - ad.add(Foo); ad.add(Distribution.from_filename("Foo-0.9.egg")) - - # Request thing(s) that are available -> list to activate - for i in range(3): - targets = list(ws.resolve(parse_requirements("Foo"), ad)) - self.assertEqual(targets, [Foo]) - map(ws.add,targets) - self.assertRaises(VersionConflict, ws.resolve, - parse_requirements("Foo==0.9"), ad) - ws = WorkingSet([]) # reset - - # Request an extra that causes an unresolved dependency for "Baz" - self.assertRaises( - DistributionNotFound, ws.resolve,parse_requirements("Foo[bar]"), ad - ) - Baz = Distribution.from_filename( - "/foo_dir/Baz-2.1.egg", metadata=Metadata(('depends.txt', "Foo")) - ) - ad.add(Baz) - - # Activation list now includes resolved dependency - self.assertEqual( - list(ws.resolve(parse_requirements("Foo[bar]"), ad)), [Foo,Baz] - ) - # Requests for conflicting versions produce VersionConflict - self.assertRaises( VersionConflict, - ws.resolve, parse_requirements("Foo==1.2\nFoo!=1.2"), ad - ) - - def testDistroDependsOptions(self): - d = self.distRequires(""" - Twisted>=1.5 - [docgen] - ZConfig>=2.0 - docutils>=0.3 - [fastcgi] - fcgiapp>=0.1""") - self.checkRequires(d,"Twisted>=1.5") - self.checkRequires( - d,"Twisted>=1.5 ZConfig>=2.0 docutils>=0.3".split(), ["docgen"] - ) - self.checkRequires( - d,"Twisted>=1.5 fcgiapp>=0.1".split(), ["fastcgi"] - ) - self.checkRequires( - d,"Twisted>=1.5 ZConfig>=2.0 docutils>=0.3 fcgiapp>=0.1".split(), - ["docgen","fastcgi"] - ) - self.checkRequires( - d,"Twisted>=1.5 fcgiapp>=0.1 ZConfig>=2.0 docutils>=0.3".split(), - ["fastcgi", "docgen"] - ) - self.assertRaises(UnknownExtra, d.requires, ["foo"]) - - - - - - - - - - - - - - - - - -class EntryPointTests(TestCase): - - def assertfields(self, ep): - self.assertEqual(ep.name,"foo") - self.assertEqual(ep.module_name,"setuptools.tests.test_resources") - self.assertEqual(ep.attrs, ("EntryPointTests",)) - self.assertEqual(ep.extras, ("x",)) - self.failUnless(ep.load() is EntryPointTests) - self.assertEqual( - str(ep), - "foo = setuptools.tests.test_resources:EntryPointTests [x]" - ) - - def setUp(self): - self.dist = Distribution.from_filename( - "FooPkg-1.2-py2.4.egg", metadata=Metadata(('requires.txt','[x]'))) - - def testBasics(self): - ep = EntryPoint( - "foo", "setuptools.tests.test_resources", ["EntryPointTests"], - ["x"], self.dist - ) - self.assertfields(ep) - - def testParse(self): - s = "foo = setuptools.tests.test_resources:EntryPointTests [x]" - ep = EntryPoint.parse(s, self.dist) - self.assertfields(ep) - - ep = EntryPoint.parse("bar baz= spammity[PING]") - self.assertEqual(ep.name,"bar baz") - self.assertEqual(ep.module_name,"spammity") - self.assertEqual(ep.attrs, ()) - self.assertEqual(ep.extras, ("ping",)) - - ep = EntryPoint.parse(" fizzly = wocka:foo") - self.assertEqual(ep.name,"fizzly") - self.assertEqual(ep.module_name,"wocka") - self.assertEqual(ep.attrs, ("foo",)) - self.assertEqual(ep.extras, ()) - - def testRejects(self): - for ep in [ - "foo", "x=1=2", "x=a:b:c", "q=x/na", "fez=pish:tush-z", "x=f[a]>2", - ]: - try: EntryPoint.parse(ep) - except ValueError: pass - else: raise AssertionError("Should've been bad", ep) - - def checkSubMap(self, m): - self.assertEqual(len(m), len(self.submap_expect)) - for key, ep in self.submap_expect.iteritems(): - self.assertEqual(repr(m.get(key)), repr(ep)) - - submap_expect = dict( - feature1=EntryPoint('feature1', 'somemodule', ['somefunction']), - feature2=EntryPoint('feature2', 'another.module', ['SomeClass'], ['extra1','extra2']), - feature3=EntryPoint('feature3', 'this.module', extras=['something']) - ) - submap_str = """ - # define features for blah blah - feature1 = somemodule:somefunction - feature2 = another.module:SomeClass [extra1,extra2] - feature3 = this.module [something] - """ - - def testParseList(self): - self.checkSubMap(EntryPoint.parse_group("xyz", self.submap_str)) - self.assertRaises(ValueError, EntryPoint.parse_group, "x a", "foo=bar") - self.assertRaises(ValueError, EntryPoint.parse_group, "x", - ["foo=baz", "foo=bar"]) - - def testParseMap(self): - m = EntryPoint.parse_map({'xyz':self.submap_str}) - self.checkSubMap(m['xyz']) - self.assertEqual(m.keys(),['xyz']) - m = EntryPoint.parse_map("[xyz]\n"+self.submap_str) - self.checkSubMap(m['xyz']) - self.assertEqual(m.keys(),['xyz']) - self.assertRaises(ValueError, EntryPoint.parse_map, ["[xyz]", "[xyz]"]) - self.assertRaises(ValueError, EntryPoint.parse_map, self.submap_str) - -class RequirementsTests(TestCase): - - def testBasics(self): - r = Requirement.parse("Twisted>=1.2") - self.assertEqual(str(r),"Twisted>=1.2") - self.assertEqual(repr(r),"Requirement.parse('Twisted>=1.2')") - self.assertEqual(r, Requirement("Twisted", [('>=','1.2')], ())) - self.assertEqual(r, Requirement("twisTed", [('>=','1.2')], ())) - self.assertNotEqual(r, Requirement("Twisted", [('>=','2.0')], ())) - self.assertNotEqual(r, Requirement("Zope", [('>=','1.2')], ())) - self.assertNotEqual(r, Requirement("Zope", [('>=','3.0')], ())) - self.assertNotEqual(r, Requirement.parse("Twisted[extras]>=1.2")) - - def testOrdering(self): - r1 = Requirement("Twisted", [('==','1.2c1'),('>=','1.2')], ()) - r2 = Requirement("Twisted", [('>=','1.2'),('==','1.2c1')], ()) - self.assertEqual(r1,r2) - self.assertEqual(str(r1),str(r2)) - self.assertEqual(str(r2),"Twisted==1.2c1,>=1.2") - - def testBasicContains(self): - r = Requirement("Twisted", [('>=','1.2')], ()) - foo_dist = Distribution.from_filename("FooPkg-1.3_1.egg") - twist11 = Distribution.from_filename("Twisted-1.1.egg") - twist12 = Distribution.from_filename("Twisted-1.2.egg") - self.failUnless(parse_version('1.2') in r) - self.failUnless(parse_version('1.1') not in r) - self.failUnless('1.2' in r) - self.failUnless('1.1' not in r) - self.failUnless(foo_dist not in r) - self.failUnless(twist11 not in r) - self.failUnless(twist12 in r) - - def testAdvancedContains(self): - r, = parse_requirements("Foo>=1.2,<=1.3,==1.9,>2.0,!=2.5,<3.0,==4.5") - for v in ('1.2','1.2.2','1.3','1.9','2.0.1','2.3','2.6','3.0c1','4.5'): - self.failUnless(v in r, (v,r)) - for v in ('1.2c1','1.3.1','1.5','1.9.1','2.0','2.5','3.0','4.0'): - self.failUnless(v not in r, (v,r)) - - - def testOptionsAndHashing(self): - r1 = Requirement.parse("Twisted[foo,bar]>=1.2") - r2 = Requirement.parse("Twisted[bar,FOO]>=1.2") - r3 = Requirement.parse("Twisted[BAR,FOO]>=1.2.0") - self.assertEqual(r1,r2) - self.assertEqual(r1,r3) - self.assertEqual(r1.extras, ("foo","bar")) - self.assertEqual(r2.extras, ("bar","foo")) # extras are normalized - self.assertEqual(hash(r1), hash(r2)) - self.assertEqual( - hash(r1), hash(("twisted", ((">=",parse_version("1.2")),), - frozenset(["foo","bar"]))) - ) - - def testVersionEquality(self): - r1 = Requirement.parse("setuptools==0.3a2") - r2 = Requirement.parse("setuptools!=0.3a4") - d = Distribution.from_filename - - self.failIf(d("setuptools-0.3a4.egg") in r1) - self.failIf(d("setuptools-0.3a1.egg") in r1) - self.failIf(d("setuptools-0.3a4.egg") in r2) - - self.failUnless(d("setuptools-0.3a2.egg") in r1) - self.failUnless(d("setuptools-0.3a2.egg") in r2) - self.failUnless(d("setuptools-0.3a3.egg") in r2) - self.failUnless(d("setuptools-0.3a5.egg") in r2) - - - - - - - - - - - - - - -class ParseTests(TestCase): - - def testEmptyParse(self): - self.assertEqual(list(parse_requirements('')), []) - - def testYielding(self): - for inp,out in [ - ([], []), ('x',['x']), ([[]],[]), (' x\n y', ['x','y']), - (['x\n\n','y'], ['x','y']), - ]: - self.assertEqual(list(pkg_resources.yield_lines(inp)),out) - - def testSplitting(self): - self.assertEqual( - list( - pkg_resources.split_sections(""" - x - [Y] - z - - a - [b ] - # foo - c - [ d] - [q] - v - """ - ) - ), - [(None,["x"]), ("Y",["z","a"]), ("b",["c"]), ("d",[]), ("q",["v"])] - ) - self.assertRaises(ValueError,list,pkg_resources.split_sections("[foo")) - - def testSafeName(self): - self.assertEqual(safe_name("adns-python"), "adns-python") - self.assertEqual(safe_name("WSGI Utils"), "WSGI-Utils") - self.assertEqual(safe_name("WSGI Utils"), "WSGI-Utils") - self.assertEqual(safe_name("Money$$$Maker"), "Money-Maker") - self.assertNotEqual(safe_name("peak.web"), "peak-web") - - def testSafeVersion(self): - self.assertEqual(safe_version("1.2-1"), "1.2-1") - self.assertEqual(safe_version("1.2 alpha"), "1.2.alpha") - self.assertEqual(safe_version("2.3.4 20050521"), "2.3.4.20050521") - self.assertEqual(safe_version("Money$$$Maker"), "Money-Maker") - self.assertEqual(safe_version("peak.web"), "peak.web") - - def testSimpleRequirements(self): - self.assertEqual( - list(parse_requirements('Twis-Ted>=1.2-1')), - [Requirement('Twis-Ted',[('>=','1.2-1')], ())] - ) - self.assertEqual( - list(parse_requirements('Twisted >=1.2, \ # more\n<2.0')), - [Requirement('Twisted',[('>=','1.2'),('<','2.0')], ())] - ) - self.assertEqual( - Requirement.parse("FooBar==1.99a3"), - Requirement("FooBar", [('==','1.99a3')], ()) - ) - self.assertRaises(ValueError,Requirement.parse,">=2.3") - self.assertRaises(ValueError,Requirement.parse,"x\\") - self.assertRaises(ValueError,Requirement.parse,"x==2 q") - self.assertRaises(ValueError,Requirement.parse,"X==1\nY==2") - self.assertRaises(ValueError,Requirement.parse,"#") - - def testVersionEquality(self): - def c(s1,s2): - p1, p2 = parse_version(s1),parse_version(s2) - self.assertEqual(p1,p2, (s1,s2,p1,p2)) - - c('1.2-rc1', '1.2rc1') - c('0.4', '0.4.0') - c('0.4.0.0', '0.4.0') - c('0.4.0-0', '0.4-0') - c('0pl1', '0.0pl1') - c('0pre1', '0.0c1') - c('0.0.0preview1', '0c1') - c('0.0c1', '0-rc1') - c('1.2a1', '1.2.a.1'); c('1.2...a', '1.2a') - - def testVersionOrdering(self): - def c(s1,s2): - p1, p2 = parse_version(s1),parse_version(s2) - self.failUnless(p1 - "easy_install will install a package that is already there" - - - "be more like distutils with regard to --prefix=" - - - "respect the PYTHONPATH" - (Note: this patch does not work as intended when site.py has been modified. - This will be fixed in a future version.) - - - * The following patch to setuptools introduced bugs, and has been reverted - in zetuptoolz: - - $ svn log -r 45514 - ------------------------------------------------------------------------ - r45514 | phillip.eby | 2006-04-18 04:03:16 +0100 (Tue, 18 Apr 2006) | 9 lines - - Backport pkgutil, pydoc, and doctest from the 2.5 trunk to setuptools - 0.7 trunk. (Sideport?) Setuptools 0.7 will install these in place of - the 2.3/2.4 versions (at least of pydoc and doctest) to let them work - properly with eggs. pkg_resources now depends on the 2.5 pkgutil, which - is included here as _pkgutil, to work around the fact that some system - packagers will install setuptools without overriding the stdlib modules. - But users who install their own setuptools will get them, and the system - packaged people probably don't need them. - ------------------------------------------------------------------------ - - - * If unpatched setuptools decides that it needs to change an existing site.py - file that appears not to have been written by it (because the file does not - start with "def __boot():"), it aborts the installation. - zetuptoolz leaves the file alone and outputs a warning, but continues with - the installation. - - - * The scripts written by zetuptoolz have the following extra line: - - # generated by zetuptoolz - - after the header. - - - * Windows-specific changes (native Python): - - Python distributions may have command-line or GUI scripts. - On Windows, setuptools creates an executable wrapper to run each - script. zetuptools uses a different approach that does not require - an .exe wrapper. It writes approximately the same script file that - is used on other platforms, but with a .pyscript extension. - It also writes a shell-script wrapper (without any extension) that - is only used when the command is run from a Cygwin shell. - - Some of the advantages of this approach are: - - * Unicode arguments are preserved (although the program will - need to use some Windows-specific code to get at them in - current versions of Python); - * it works correctly on 64-bit Windows; - * the zetuptoolz distribution need not contain either any - binary executables, or any C code that needs to be compiled. - - See setuptools\tests\win_script_wrapper.txt for further details. - - Installing or building any distribution on Windows will automatically - associate .pyscript with the native Python interpreter for the current - user. It will also add .pyscript and .pyw to the PATHEXT variable for - the current user, which is needed to allow scripts to be run without - typing any extension. - - There is an additional setup.py command that can be used to perform - these steps separately (which isn't normally needed, but might be - useful for debugging): - - python setup.py scriptsetup - - Adding the --allusers option, i.e. - - python setup.py scriptsetup --allusers - - will make the .pyscript association and changes to the PATHEXT variable - for all users of this Windows installation, except those that have it - overridden in their per-user environment. In this case setup.py must be - run with Administrator privileges, e.g. from a Command Prompt whose - shortcut has been set to run as Administrator. diff -Nru tahoe-lafs-1.9.2/setuptools-0.6c16dev4.egg/EGG-INFO/PKG-INFO tahoe-lafs-1.10.0/setuptools-0.6c16dev4.egg/EGG-INFO/PKG-INFO --- tahoe-lafs-1.9.2/setuptools-0.6c16dev4.egg/EGG-INFO/PKG-INFO 1970-01-01 00:00:00.000000000 +0000 +++ tahoe-lafs-1.10.0/setuptools-0.6c16dev4.egg/EGG-INFO/PKG-INFO 2013-09-03 15:38:27.000000000 +0000 @@ -0,0 +1,100 @@ +Metadata-Version: 1.0 +Name: setuptools +Version: 0.6c16dev4 +Summary: Download, build, install, upgrade, and uninstall Python packages -- easily! (zetuptoolz fork) +Home-page: http://pypi.python.org/pypi/setuptools +Author: Phillip J. Eby +Author-email: distutils-sig@python.org +License: PSF or ZPL +Description: ====================== + This is not Setuptools + ====================== + + This is the ``zetuptoolz`` fork of setuptools, which is used to install + `Tahoe-LAFS`_. It has a `darcs source repository`_ and `issue tracker`_. + + For a list of differences between this fork and setuptools, see zetuptoolz.txt. + + Note that, to avoid interfering with any setuptools installation, zetuptoolz + does not install a script called ``easy_install``. There is an ``easy_install_z`` + script, but that is intended only for developers to test differences between + setuptools and zetuptoolz. + + .. _Tahoe-LAFS: http://tahoe-lafs.org/ + .. _darcs source repository: http://tahoe-lafs.org/source/zetuptoolz/trunk + .. _issue tracker: http://tahoe-lafs.org/trac/zetuptoolz + + + -------------------------------- + Using Setuptools and EasyInstall + -------------------------------- + + Here are some of the available manuals, tutorials, and other resources for + learning about Setuptools, Python Eggs, and EasyInstall: + + * `The EasyInstall user's guide and reference manual`_ + * `The setuptools Developer's Guide`_ + * `The pkg_resources API reference`_ + * `Package Compatibility Notes`_ (user-maintained) + * `The Internal Structure of Python Eggs`_ + + Questions, comments, and bug reports should be directed to the `distutils-sig + mailing list`_. If you have written (or know of) any tutorials, documentation, + plug-ins, or other resources for setuptools users, please let us know about + them there, so this reference list can be updated. If you have working, + *tested* patches to correct problems or add features, you may submit them to + the `setuptools bug tracker`_. + + .. _setuptools bug tracker: http://bugs.python.org/setuptools/ + .. _Package Compatibility Notes: http://peak.telecommunity.com/DevCenter/PackageNotes + .. _The Internal Structure of Python Eggs: http://peak.telecommunity.com/DevCenter/EggFormats + .. _The setuptools Developer's Guide: http://peak.telecommunity.com/DevCenter/setuptools + .. _The pkg_resources API reference: http://peak.telecommunity.com/DevCenter/PkgResources + .. _The EasyInstall user's guide and reference manual: http://peak.telecommunity.com/DevCenter/EasyInstall + .. _distutils-sig mailing list: http://mail.python.org/pipermail/distutils-sig/ + + + ------- + Credits + ------- + + * The original design for the ``.egg`` format and the ``pkg_resources`` API was + co-created by Phillip Eby and Bob Ippolito. Bob also implemented the first + version of ``pkg_resources``, and supplied the OS X operating system version + compatibility algorithm. + + * Ian Bicking implemented many early "creature comfort" features of + easy_install, including support for downloading via Sourceforge and + Subversion repositories. Ian's comments on the Web-SIG about WSGI + application deployment also inspired the concept of "entry points" in eggs, + and he has given talks at PyCon and elsewhere to inform and educate the + community about eggs and setuptools. + + * Jim Fulton contributed time and effort to build automated tests of various + aspects of ``easy_install``, and supplied the doctests for the command-line + ``.exe`` wrappers on Windows. + + * Phillip J. Eby is the principal author and maintainer of setuptools, and + first proposed the idea of an importable binary distribution format for + Python application plug-ins. + + * Significant parts of the implementation of setuptools were funded by the Open + Source Applications Foundation, to provide a plug-in infrastructure for the + Chandler PIM application. In addition, many OSAF staffers (such as Mike + "Code Bear" Taylor) contributed their time and stress as guinea pigs for the + use of eggs and setuptools, even before eggs were "cool". (Thanks, guys!) + + .. _files: + +Keywords: CPAN PyPI distutils eggs package management +Platform: UNKNOWN +Classifier: Development Status :: 3 - Alpha +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: Python Software Foundation License +Classifier: License :: OSI Approved :: Zope Public License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Classifier: Topic :: System :: Archiving :: Packaging +Classifier: Topic :: System :: Systems Administration +Classifier: Topic :: Utilities diff -Nru tahoe-lafs-1.9.2/setuptools-0.6c16dev4.egg/EGG-INFO/SOURCES.txt tahoe-lafs-1.10.0/setuptools-0.6c16dev4.egg/EGG-INFO/SOURCES.txt --- tahoe-lafs-1.9.2/setuptools-0.6c16dev4.egg/EGG-INFO/SOURCES.txt 1970-01-01 00:00:00.000000000 +0000 +++ tahoe-lafs-1.10.0/setuptools-0.6c16dev4.egg/EGG-INFO/SOURCES.txt 2013-09-03 15:38:27.000000000 +0000 @@ -0,0 +1,44 @@ +README.txt +easy_install.py +pkg_resources.py +setup.cfg +setup.py +setuptools/__init__.py +setuptools/archive_util.py +setuptools/depends.py +setuptools/dist.py +setuptools/extension.py +setuptools/package_index.py +setuptools/sandbox.py +setuptools/site-patch.py +setuptools.egg-info/PKG-INFO +setuptools.egg-info/SOURCES.txt +setuptools.egg-info/dependency_links.txt +setuptools.egg-info/entry_points.txt +setuptools.egg-info/top_level.txt +setuptools.egg-info/zip-safe +setuptools/command/__init__.py +setuptools/command/alias.py +setuptools/command/bdist_egg.py +setuptools/command/bdist_rpm.py +setuptools/command/bdist_wininst.py +setuptools/command/build_ext.py +setuptools/command/build_py.py +setuptools/command/develop.py +setuptools/command/easy_install.py +setuptools/command/egg_info.py +setuptools/command/install.py +setuptools/command/install_egg_info.py +setuptools/command/install_lib.py +setuptools/command/install_scripts.py +setuptools/command/register.py +setuptools/command/rotate.py +setuptools/command/saveopts.py +setuptools/command/scriptsetup.py +setuptools/command/sdist.py +setuptools/command/setopt.py +setuptools/command/test.py +setuptools/command/upload.py +setuptools/tests/__init__.py +setuptools/tests/test_packageindex.py +setuptools/tests/test_resources.py \ No newline at end of file diff -Nru tahoe-lafs-1.9.2/setuptools-0.6c16dev4.egg/EGG-INFO/dependency_links.txt tahoe-lafs-1.10.0/setuptools-0.6c16dev4.egg/EGG-INFO/dependency_links.txt --- tahoe-lafs-1.9.2/setuptools-0.6c16dev4.egg/EGG-INFO/dependency_links.txt 1970-01-01 00:00:00.000000000 +0000 +++ tahoe-lafs-1.10.0/setuptools-0.6c16dev4.egg/EGG-INFO/dependency_links.txt 2013-09-03 15:38:27.000000000 +0000 @@ -0,0 +1 @@ + diff -Nru tahoe-lafs-1.9.2/setuptools-0.6c16dev4.egg/EGG-INFO/entry_points.txt tahoe-lafs-1.10.0/setuptools-0.6c16dev4.egg/EGG-INFO/entry_points.txt --- tahoe-lafs-1.9.2/setuptools-0.6c16dev4.egg/EGG-INFO/entry_points.txt 1970-01-01 00:00:00.000000000 +0000 +++ tahoe-lafs-1.10.0/setuptools-0.6c16dev4.egg/EGG-INFO/entry_points.txt 2013-09-03 15:38:27.000000000 +0000 @@ -0,0 +1,59 @@ +[distutils.commands] +bdist_rpm = setuptools.command.bdist_rpm:bdist_rpm +rotate = setuptools.command.rotate:rotate +develop = setuptools.command.develop:develop +setopt = setuptools.command.setopt:setopt +build_py = setuptools.command.build_py:build_py +scriptsetup = setuptools.command.scriptsetup:scriptsetup +saveopts = setuptools.command.saveopts:saveopts +egg_info = setuptools.command.egg_info:egg_info +register = setuptools.command.register:register +install_egg_info = setuptools.command.install_egg_info:install_egg_info +alias = setuptools.command.alias:alias +easy_install = setuptools.command.easy_install:easy_install +install_scripts = setuptools.command.install_scripts:install_scripts +bdist_wininst = setuptools.command.bdist_wininst:bdist_wininst +bdist_egg = setuptools.command.bdist_egg:bdist_egg +install = setuptools.command.install:install +test = setuptools.command.test:test +install_lib = setuptools.command.install_lib:install_lib +build_ext = setuptools.command.build_ext:build_ext +sdist = setuptools.command.sdist:sdist + +[egg_info.writers] +dependency_links.txt = setuptools.command.egg_info:overwrite_arg +requires.txt = setuptools.command.egg_info:write_requirements +PKG-INFO = setuptools.command.egg_info:write_pkg_info +eager_resources.txt = setuptools.command.egg_info:overwrite_arg +top_level.txt = setuptools.command.egg_info:write_toplevel_names +namespace_packages.txt = setuptools.command.egg_info:overwrite_arg +entry_points.txt = setuptools.command.egg_info:write_entries +depends.txt = setuptools.command.egg_info:warn_depends_obsolete + +[console_scripts] +easy_install_z-2.6 = setuptools.command.easy_install:main +easy_install_z = setuptools.command.easy_install:main + +[setuptools.file_finders] +svn_cvs = setuptools.command.sdist:_default_revctrl + +[distutils.setup_keywords] +dependency_links = setuptools.dist:assert_string_list +entry_points = setuptools.dist:check_entry_points +extras_require = setuptools.dist:check_extras +test_runner = setuptools.dist:check_importable +package_data = setuptools.dist:check_package_data +install_requires = setuptools.dist:check_requirements +include_package_data = setuptools.dist:assert_bool +exclude_package_data = setuptools.dist:check_package_data +namespace_packages = setuptools.dist:check_nsp +test_suite = setuptools.dist:check_test_suite +eager_resources = setuptools.dist:assert_string_list +zip_safe = setuptools.dist:assert_bool +test_loader = setuptools.dist:check_importable +packages = setuptools.dist:check_packages +tests_require = setuptools.dist:check_requirements + +[setuptools.installation] +eggsecutable = setuptools.command.easy_install:bootstrap + diff -Nru tahoe-lafs-1.9.2/setuptools-0.6c16dev4.egg/EGG-INFO/top_level.txt tahoe-lafs-1.10.0/setuptools-0.6c16dev4.egg/EGG-INFO/top_level.txt --- tahoe-lafs-1.9.2/setuptools-0.6c16dev4.egg/EGG-INFO/top_level.txt 1970-01-01 00:00:00.000000000 +0000 +++ tahoe-lafs-1.10.0/setuptools-0.6c16dev4.egg/EGG-INFO/top_level.txt 2013-09-03 15:38:27.000000000 +0000 @@ -0,0 +1,3 @@ +easy_install +pkg_resources +setuptools diff -Nru tahoe-lafs-1.9.2/setuptools-0.6c16dev4.egg/EGG-INFO/zip-safe tahoe-lafs-1.10.0/setuptools-0.6c16dev4.egg/EGG-INFO/zip-safe --- tahoe-lafs-1.9.2/setuptools-0.6c16dev4.egg/EGG-INFO/zip-safe 1970-01-01 00:00:00.000000000 +0000 +++ tahoe-lafs-1.10.0/setuptools-0.6c16dev4.egg/EGG-INFO/zip-safe 2013-09-03 15:38:27.000000000 +0000 @@ -0,0 +1 @@ + diff -Nru tahoe-lafs-1.9.2/setuptools-0.6c16dev4.egg/easy_install.py tahoe-lafs-1.10.0/setuptools-0.6c16dev4.egg/easy_install.py --- tahoe-lafs-1.9.2/setuptools-0.6c16dev4.egg/easy_install.py 1970-01-01 00:00:00.000000000 +0000 +++ tahoe-lafs-1.10.0/setuptools-0.6c16dev4.egg/easy_install.py 2013-09-03 15:38:27.000000000 +0000 @@ -0,0 +1,5 @@ +"""Run the EasyInstall command""" + +if __name__ == '__main__': + from setuptools.command.easy_install import main + main() diff -Nru tahoe-lafs-1.9.2/setuptools-0.6c16dev4.egg/pkg_resources.py tahoe-lafs-1.10.0/setuptools-0.6c16dev4.egg/pkg_resources.py --- tahoe-lafs-1.9.2/setuptools-0.6c16dev4.egg/pkg_resources.py 1970-01-01 00:00:00.000000000 +0000 +++ tahoe-lafs-1.10.0/setuptools-0.6c16dev4.egg/pkg_resources.py 2013-09-03 15:38:27.000000000 +0000 @@ -0,0 +1,2656 @@ +"""Package resource API +-------------------- + +A resource is a logical file contained within a package, or a logical +subdirectory thereof. The package resource API expects resource names +to have their path parts separated with ``/``, *not* whatever the local +path separator is. Do not use os.path operations to manipulate resource +names being passed into the API. + +The package resource API is designed to work with normal filesystem packages, +.egg files, and unpacked .egg files. It can also work in a limited way with +.zip files and with custom PEP 302 loaders that support the ``get_data()`` +method. +""" + +import sys, os, zipimport, time, re, imp + +try: + frozenset +except NameError: + from sets import ImmutableSet as frozenset + +# capture these to bypass sandboxing +from os import utime, rename, unlink, mkdir +from os import open as os_open +from os.path import isdir, split + +def _bypass_ensure_directory(name, mode=0777): + # Sandbox-bypassing version of ensure_directory() + dirname, filename = split(name) + if dirname and filename and not isdir(dirname): + _bypass_ensure_directory(dirname) + mkdir(dirname, mode) + + + + + + + + +_state_vars = {} + +def _declare_state(vartype, **kw): + g = globals() + for name, val in kw.iteritems(): + g[name] = val + _state_vars[name] = vartype + +def __getstate__(): + state = {} + g = globals() + for k, v in _state_vars.iteritems(): + state[k] = g['_sget_'+v](g[k]) + return state + +def __setstate__(state): + g = globals() + for k, v in state.iteritems(): + g['_sset_'+_state_vars[k]](k, g[k], v) + return state + +def _sget_dict(val): + return val.copy() + +def _sset_dict(key, ob, state): + ob.clear() + ob.update(state) + +def _sget_object(val): + return val.__getstate__() + +def _sset_object(key, ob, state): + ob.__setstate__(state) + +_sget_none = _sset_none = lambda *args: None + + + + + + +def get_supported_platform(): + """Return this platform's maximum compatible version. + + distutils.util.get_platform() normally reports the minimum version + of Mac OS X that would be required to *use* extensions produced by + distutils. But what we want when checking compatibility is to know the + version of Mac OS X that we are *running*. To allow usage of packages that + explicitly require a newer version of Mac OS X, we must also know the + current version of the OS. + + If this condition occurs for any other platform with a version in its + platform strings, this function should be extended accordingly. + """ + plat = get_build_platform(); m = macosVersionString.match(plat) + if m is not None and sys.platform == "darwin": + try: + plat = 'macosx-%s-%s' % ('.'.join(_macosx_vers()[:2]), m.group(3)) + except ValueError: + pass # not Mac OS X + return plat + + + + + + + + + + + + + + + + + + + + + +__all__ = [ + # Basic resource access and distribution/entry point discovery + 'require', 'run_script', 'get_provider', 'get_distribution', + 'load_entry_point', 'get_entry_map', 'get_entry_info', 'iter_entry_points', + 'resource_string', 'resource_stream', 'resource_filename', + 'resource_listdir', 'resource_exists', 'resource_isdir', + + # Environmental control + 'declare_namespace', 'working_set', 'add_activation_listener', + 'find_distributions', 'set_extraction_path', 'cleanup_resources', + 'get_default_cache', + + # Primary implementation classes + 'Environment', 'WorkingSet', 'ResourceManager', + 'Distribution', 'Requirement', 'EntryPoint', + + # Exceptions + 'ResolutionError','VersionConflict','DistributionNotFound','UnknownExtra', + 'ExtractionError', + + # Parsing functions and string utilities + 'parse_requirements', 'parse_version', 'safe_name', 'safe_version', + 'get_platform', 'compatible_platforms', 'yield_lines', 'split_sections', + 'safe_extra', 'to_filename', + + # filesystem utilities + 'ensure_directory', 'normalize_path', + + # Distribution "precedence" constants + 'EGG_DIST', 'BINARY_DIST', 'SOURCE_DIST', 'CHECKOUT_DIST', 'DEVELOP_DIST', + + # "Provider" interfaces, implementations, and registration/lookup APIs + 'IMetadataProvider', 'IResourceProvider', 'FileMetadata', + 'PathMetadata', 'EggMetadata', 'EmptyProvider', 'empty_provider', + 'NullProvider', 'EggProvider', 'DefaultProvider', 'ZipProvider', + 'register_finder', 'register_namespace_handler', 'register_loader_type', + 'fixup_namespace_packages', 'get_importer', + + # Deprecated/backward compatibility only + 'run_main', 'AvailableDistributions', +] +class ResolutionError(Exception): + """Abstract base for dependency resolution errors""" + def __repr__(self): + return self.__class__.__name__+repr(self.args) + +class VersionConflict(ResolutionError): + """An already-installed version conflicts with the requested version""" + +class DistributionNotFound(ResolutionError): + """A requested distribution was not found""" + +class UnknownExtra(ResolutionError): + """Distribution doesn't have an "extra feature" of the given name""" + +_provider_factories = {} +PY_MAJOR = sys.version[:3] +EGG_DIST = 3 +BINARY_DIST = 2 +SOURCE_DIST = 1 +CHECKOUT_DIST = 0 +DEVELOP_DIST = -1 + +def register_loader_type(loader_type, provider_factory): + """Register `provider_factory` to make providers for `loader_type` + + `loader_type` is the type or class of a PEP 302 ``module.__loader__``, + and `provider_factory` is a function that, passed a *module* object, + returns an ``IResourceProvider`` for that module. + """ + _provider_factories[loader_type] = provider_factory + +def get_provider(moduleOrReq): + """Return an IResourceProvider for the named module or requirement""" + if isinstance(moduleOrReq,Requirement): + return working_set.find(moduleOrReq) or require(str(moduleOrReq))[0] + try: + module = sys.modules[moduleOrReq] + except KeyError: + __import__(moduleOrReq) + module = sys.modules[moduleOrReq] + loader = getattr(module, '__loader__', None) + return _find_adapter(_provider_factories, loader)(module) + +def _macosx_vers(_cache=[]): + if not _cache: + from platform import mac_ver + _cache.append(mac_ver()[0].split('.')) + return _cache[0] + +def _macosx_arch(machine): + return {'PowerPC':'ppc', 'Power_Macintosh':'ppc'}.get(machine,machine) + +def get_build_platform(): + """Return this platform's string for platform-specific distributions + + XXX Currently this is the same as ``distutils.util.get_platform()``, but it + needs some hacks for Linux and Mac OS X. + """ + from distutils.util import get_platform + plat = get_platform() + if sys.platform == "darwin" and not plat.startswith('macosx-'): + try: + version = _macosx_vers() + machine = os.uname()[4].replace(" ", "_") + return "macosx-%d.%d-%s" % (int(version[0]), int(version[1]), + _macosx_arch(machine)) + except ValueError: + # if someone is running a non-Mac darwin system, this will fall + # through to the default implementation + pass + return plat + +macosVersionString = re.compile(r"macosx-(\d+)\.(\d+)-(.*)") +darwinVersionString = re.compile(r"darwin-(\d+)\.(\d+)\.(\d+)-(.*)") +get_platform = get_build_platform # XXX backward compat + + + + + + + +def compatible_platforms(provided,required): + """Can code for the `provided` platform run on the `required` platform? + + Returns true if either platform is ``None``, or the platforms are equal. + + XXX Needs compatibility checks for Linux and other unixy OSes. + """ + if provided is None or required is None or provided==required: + return True # easy case + + # Mac OS X special cases + reqMac = macosVersionString.match(required) + if reqMac: + provMac = macosVersionString.match(provided) + + # is this a Mac package? + if not provMac: + # this is backwards compatibility for packages built before + # setuptools 0.6. All packages built after this point will + # use the new macosx designation. + provDarwin = darwinVersionString.match(provided) + if provDarwin: + dversion = int(provDarwin.group(1)) + macosversion = "%s.%s" % (reqMac.group(1), reqMac.group(2)) + if dversion == 7 and macosversion >= "10.3" or \ + dversion == 8 and macosversion >= "10.4": + + #import warnings + #warnings.warn("Mac eggs should be rebuilt to " + # "use the macosx designation instead of darwin.", + # category=DeprecationWarning) + return True + return False # egg isn't macosx or legacy darwin + + # are they the same major version and machine type? + if provMac.group(1) != reqMac.group(1) or \ + provMac.group(3) != reqMac.group(3): + return False + + + + # is the required OS major update >= the provided one? + if int(provMac.group(2)) > int(reqMac.group(2)): + return False + + return True + + # XXX Linux and other platforms' special cases should go here + return False + + +def run_script(dist_spec, script_name): + """Locate distribution `dist_spec` and run its `script_name` script""" + ns = sys._getframe(1).f_globals + name = ns['__name__'] + ns.clear() + ns['__name__'] = name + require(dist_spec)[0].run_script(script_name, ns) + +run_main = run_script # backward compatibility + +def get_distribution(dist): + """Return a current distribution object for a Requirement or string""" + if isinstance(dist,basestring): dist = Requirement.parse(dist) + if isinstance(dist,Requirement): dist = get_provider(dist) + if not isinstance(dist,Distribution): + raise TypeError("Expected string, Requirement, or Distribution", dist) + return dist + +def load_entry_point(dist, group, name): + """Return `name` entry point of `group` for `dist` or raise ImportError""" + return get_distribution(dist).load_entry_point(group, name) + +def get_entry_map(dist, group=None): + """Return the entry point map for `group`, or the full entry map""" + return get_distribution(dist).get_entry_map(group) + +def get_entry_info(dist, group, name): + """Return the EntryPoint object for `group`+`name`, or ``None``""" + return get_distribution(dist).get_entry_info(group, name) + + +class IMetadataProvider: + + def has_metadata(name): + """Does the package's distribution contain the named metadata?""" + + def get_metadata(name): + """The named metadata resource as a string""" + + def get_metadata_lines(name): + """Yield named metadata resource as list of non-blank non-comment lines + + Leading and trailing whitespace is stripped from each line, and lines + with ``#`` as the first non-blank character are omitted.""" + + def metadata_isdir(name): + """Is the named metadata a directory? (like ``os.path.isdir()``)""" + + def metadata_listdir(name): + """List of metadata names in the directory (like ``os.listdir()``)""" + + def run_script(script_name, namespace): + """Execute the named script in the supplied namespace dictionary""" + + + + + + + + + + + + + + + + + + + +class IResourceProvider(IMetadataProvider): + """An object that provides access to package resources""" + + def get_resource_filename(manager, resource_name): + """Return a true filesystem path for `resource_name` + + `manager` must be an ``IResourceManager``""" + + def get_resource_stream(manager, resource_name): + """Return a readable file-like object for `resource_name` + + `manager` must be an ``IResourceManager``""" + + def get_resource_string(manager, resource_name): + """Return a string containing the contents of `resource_name` + + `manager` must be an ``IResourceManager``""" + + def has_resource(resource_name): + """Does the package contain the named resource?""" + + def resource_isdir(resource_name): + """Is the named resource a directory? (like ``os.path.isdir()``)""" + + def resource_listdir(resource_name): + """List of resource names in the directory (like ``os.listdir()``)""" + + + + + + + + + + + + + + + +class WorkingSet(object): + """A collection of active distributions on sys.path (or a similar list)""" + + def __init__(self, entries=None): + """Create working set from list of path entries (default=sys.path)""" + self.entries = [] + self.entry_keys = {} + self.by_key = {} + self.callbacks = [] + + if entries is None: + entries = sys.path + + for entry in entries: + self.add_entry(entry) + + + def add_entry(self, entry): + """Add a path item to ``.entries``, finding any distributions on it + + ``find_distributions(entry, True)`` is used to find distributions + corresponding to the path entry, and they are added. `entry` is + always appended to ``.entries``, even if it is already present. + (This is because ``sys.path`` can contain the same value more than + once, and the ``.entries`` of the ``sys.path`` WorkingSet should always + equal ``sys.path``.) + """ + self.entry_keys.setdefault(entry, []) + self.entries.append(entry) + for dist in find_distributions(entry, True): + self.add(dist, entry, False) + + + def __contains__(self,dist): + """True if `dist` is the active distribution for its project""" + return self.by_key.get(dist.key) == dist + + + + + + def find(self, req): + """Find a distribution matching requirement `req` + + If there is an active distribution for the requested project, this + returns it as long as it meets the version requirement specified by + `req`. But, if there is an active distribution for the project and it + does *not* meet the `req` requirement, ``VersionConflict`` is raised. + If there is no active distribution for the requested project, ``None`` + is returned. + """ + dist = self.by_key.get(req.key) + if dist is not None and dist not in req: + raise VersionConflict(dist,req) # XXX add more info + else: + return dist + + def iter_entry_points(self, group, name=None): + """Yield entry point objects from `group` matching `name` + + If `name` is None, yields all entry points in `group` from all + distributions in the working set, otherwise only ones matching + both `group` and `name` are yielded (in distribution order). + """ + for dist in self: + entries = dist.get_entry_map(group) + if name is None: + for ep in entries.values(): + yield ep + elif name in entries: + yield entries[name] + + def run_script(self, requires, script_name): + """Locate distribution for `requires` and run `script_name` script""" + ns = sys._getframe(1).f_globals + name = ns['__name__'] + ns.clear() + ns['__name__'] = name + self.require(requires)[0].run_script(script_name, ns) + + + + def __iter__(self): + """Yield distributions for non-duplicate projects in the working set + + The yield order is the order in which the items' path entries were + added to the working set. + """ + seen = {} + for item in self.entries: + for key in self.entry_keys[item]: + if key not in seen: + seen[key]=1 + yield self.by_key[key] + + def add(self, dist, entry=None, insert=True): + """Add `dist` to working set, associated with `entry` + + If `entry` is unspecified, it defaults to the ``.location`` of `dist`. + On exit from this routine, `entry` is added to the end of the working + set's ``.entries`` (if it wasn't already present). + + `dist` is only added to the working set if it's for a project that + doesn't already have a distribution in the set. If it's added, any + callbacks registered with the ``subscribe()`` method will be called. + """ + if insert: + dist.insert_on(self.entries, entry) + + if entry is None: + entry = dist.location + keys = self.entry_keys.setdefault(entry,[]) + keys2 = self.entry_keys.setdefault(dist.location,[]) + if dist.key in self.by_key: + return # ignore hidden distros + + # If we have a __requires__ then we can already tell if this + # dist is unsatisfactory, in which case we won't add it. + if __requires__ is not None: + for thisreqstr in __requires__: + try: + for thisreq in parse_requirements(thisreqstr): + if thisreq.key == dist.key: + if dist not in thisreq: + return + except ValueError, e: + e.args = tuple(e.args + ({'thisreqstr': thisreqstr},)) + raise + + self.by_key[dist.key] = dist + if dist.key not in keys: + keys.append(dist.key) + if dist.key not in keys2: + keys2.append(dist.key) + self._added_new(dist) + + def resolve(self, requirements, env=None, installer=None): + """List all distributions needed to (recursively) meet `requirements` + + `requirements` must be a sequence of ``Requirement`` objects. `env`, + if supplied, should be an ``Environment`` instance. If + not supplied, it defaults to all distributions available within any + entry or distribution in the working set. `installer`, if supplied, + will be invoked with each requirement that cannot be met by an + already-installed distribution; it should return a ``Distribution`` or + ``None``. + """ + + requirements = list(requirements)[::-1] # set up the stack + processed = {} # set of processed requirements + best = {} # key -> dist + to_activate = [] + + while requirements: + req = requirements.pop(0) # process dependencies breadth-first + if req in processed: + # Ignore cyclic or redundant dependencies + continue + dist = best.get(req.key) + if dist is None: + # Find the best distribution and add it to the map + dist = self.by_key.get(req.key) + if dist is None: + if env is None: + env = Environment(self.entries) + dist = best[req.key] = env.best_match(req, self, installer) + if dist is None: + raise DistributionNotFound(req) # XXX put more info here + to_activate.append(dist) + if dist not in req: + # Oops, the "best" so far conflicts with a dependency + raise VersionConflict(dist,req) # XXX put more info here + requirements.extend(dist.requires(req.extras)[::-1]) + processed[req] = True + + return to_activate # return list of distros to activate + + def find_plugins(self, + plugin_env, full_env=None, installer=None, fallback=True + ): + """Find all activatable distributions in `plugin_env` + + Example usage:: + + distributions, errors = working_set.find_plugins( + Environment(plugin_dirlist) + ) + map(working_set.add, distributions) # add plugins+libs to sys.path + print "Couldn't load", errors # display errors + + The `plugin_env` should be an ``Environment`` instance that contains + only distributions that are in the project's "plugin directory" or + directories. The `full_env`, if supplied, should be an ``Environment`` + contains all currently-available distributions. If `full_env` is not + supplied, one is created automatically from the ``WorkingSet`` this + method is called on, which will typically mean that every directory on + ``sys.path`` will be scanned for distributions. + + `installer` is a standard installer callback as used by the + ``resolve()`` method. The `fallback` flag indicates whether we should + attempt to resolve older versions of a plugin if the newest version + cannot be resolved. + + This method returns a 2-tuple: (`distributions`, `error_info`), where + `distributions` is a list of the distributions found in `plugin_env` + that were loadable, along with any other distributions that are needed + to resolve their dependencies. `error_info` is a dictionary mapping + unloadable plugin distributions to an exception instance describing the + error that occurred. Usually this will be a ``DistributionNotFound`` or + ``VersionConflict`` instance. + """ + + plugin_projects = list(plugin_env) + plugin_projects.sort() # scan project names in alphabetic order + + error_info = {} + distributions = {} + + if full_env is None: + env = Environment(self.entries) + env += plugin_env + else: + env = full_env + plugin_env + + shadow_set = self.__class__([]) + map(shadow_set.add, self) # put all our entries in shadow_set + + for project_name in plugin_projects: + + for dist in plugin_env[project_name]: + + req = [dist.as_requirement()] + + try: + resolvees = shadow_set.resolve(req, env, installer) + + except ResolutionError,v: + error_info[dist] = v # save error info + if fallback: + continue # try the next older version of project + else: + break # give up on this project, keep going + + else: + map(shadow_set.add, resolvees) + distributions.update(dict.fromkeys(resolvees)) + + # success, no need to try any more versions of this project + break + + distributions = list(distributions) + distributions.sort() + + return distributions, error_info + + + + + + def require(self, *requirements): + """Ensure that distributions matching `requirements` are activated + + `requirements` must be a string or a (possibly-nested) sequence + thereof, specifying the distributions and versions required. The + return value is a sequence of the distributions that needed to be + activated to fulfill the requirements; all relevant distributions are + included, even if they were already activated in this working set. + """ + needed = self.resolve(parse_requirements(requirements)) + + for dist in needed: + self.add(dist) + + return needed + + def subscribe(self, callback): + """Invoke `callback` for all distributions (including existing ones)""" + if callback in self.callbacks: + return + self.callbacks.append(callback) + for dist in self: + callback(dist) + + def _added_new(self, dist): + for callback in self.callbacks: + callback(dist) + + def __getstate__(self): + return ( + self.entries[:], self.entry_keys.copy(), self.by_key.copy(), + self.callbacks[:] + ) + + def __setstate__(self, (entries, keys, by_key, callbacks)): + self.entries = entries[:] + self.entry_keys = keys.copy() + self.by_key = by_key.copy() + self.callbacks = callbacks[:] + + +class Environment(object): + """Searchable snapshot of distributions on a search path""" + + def __init__(self, search_path=None, platform=get_supported_platform(), python=PY_MAJOR): + """Snapshot distributions available on a search path + + Any distributions found on `search_path` are added to the environment. + `search_path` should be a sequence of ``sys.path`` items. If not + supplied, ``sys.path`` is used. + + `platform` is an optional string specifying the name of the platform + that platform-specific distributions must be compatible with. If + unspecified, it defaults to the current platform. `python` is an + optional string naming the desired version of Python (e.g. ``'2.4'``); + it defaults to the current version. + + You may explicitly set `platform` (and/or `python`) to ``None`` if you + wish to map *all* distributions, not just those compatible with the + running platform or Python version. + """ + self._distmap = {} + self._cache = {} + self.platform = platform + self.python = python + self.scan(search_path) + + def can_add(self, dist): + """Is distribution `dist` acceptable for this environment? + + The distribution must match the platform and python version + requirements specified when this environment was created, or False + is returned. + """ + return (self.python is None or dist.py_version is None + or dist.py_version==self.python) \ + and compatible_platforms(dist.platform,self.platform) + + def remove(self, dist): + """Remove `dist` from the environment""" + self._distmap[dist.key].remove(dist) + + def scan(self, search_path=None): + """Scan `search_path` for distributions usable in this environment + + Any distributions found are added to the environment. + `search_path` should be a sequence of ``sys.path`` items. If not + supplied, ``sys.path`` is used. Only distributions conforming to + the platform/python version defined at initialization are added. + """ + if search_path is None: + search_path = sys.path + + for item in search_path: + for dist in find_distributions(item): + self.add(dist) + + def __getitem__(self,project_name): + """Return a newest-to-oldest list of distributions for `project_name` + """ + try: + return self._cache[project_name] + except KeyError: + project_name = project_name.lower() + if project_name not in self._distmap: + return [] + + if project_name not in self._cache: + dists = self._cache[project_name] = self._distmap[project_name] + _sort_dists(dists) + + return self._cache[project_name] + + def add(self,dist): + """Add `dist` if we ``can_add()`` it and it isn't already added""" + if self.can_add(dist) and dist.has_version(): + dists = self._distmap.setdefault(dist.key,[]) + if dist not in dists: + dists.append(dist) + if dist.key in self._cache: + _sort_dists(self._cache[dist.key]) + + + def best_match(self, req, working_set, installer=None): + """Find distribution best matching `req` and usable on `working_set` + + This calls the ``find(req)`` method of the `working_set` to see if a + suitable distribution is already active. (This may raise + ``VersionConflict`` if an unsuitable version of the project is already + active in the specified `working_set`.) + + If a suitable distribution isn't active, this method returns the + newest platform-dependent distribution in the environment that meets + the ``Requirement`` in `req`. If no suitable platform-dependent + distribution is found, then the newest platform-independent + distribution that meets the requirement is returned. (A platform- + dependent distribution will typically have code compiled or + specialized for that platform.) + + Otherwise, if `installer` is supplied, then the result of calling the + environment's ``obtain(req, installer)`` method will be returned. + """ + dist = working_set.find(req) + if dist is not None: + return dist + + # first try to find a platform-dependent dist + for dist in self[req.key]: + if dist in req and dist.platform is not None: + return dist + + # then try any other dist + for dist in self[req.key]: + if dist in req: + return dist + + return self.obtain(req, installer) # try and download/install + + def obtain(self, requirement, installer=None): + """Obtain a distribution matching `requirement` (e.g. via download) + + Obtain a distro that matches requirement (e.g. via download). In the + base ``Environment`` class, this routine just returns + ``installer(requirement)``, unless `installer` is None, in which case + None is returned instead. This method is a hook that allows subclasses + to attempt other ways of obtaining a distribution before falling back + to the `installer` argument.""" + if installer is not None: + return installer(requirement) + + def __iter__(self): + """Yield the unique project names of the available distributions""" + for key in self._distmap.keys(): + if self[key]: yield key + + + + + def __iadd__(self, other): + """In-place addition of a distribution or environment""" + if isinstance(other,Distribution): + self.add(other) + elif isinstance(other,Environment): + for project in other: + for dist in other[project]: + self.add(dist) + else: + raise TypeError("Can't add %r to environment" % (other,)) + return self + + def __add__(self, other): + """Add an environment or distribution to an environment""" + new = self.__class__([], platform=None, python=None) + for env in self, other: + new += env + return new + + +AvailableDistributions = Environment # XXX backward compatibility + + +class ExtractionError(RuntimeError): + """An error occurred extracting a resource + + The following attributes are available from instances of this exception: + + manager + The resource manager that raised this exception + + cache_path + The base directory for resource extraction + + original_error + The exception instance that caused extraction to fail + """ + + + + +class ResourceManager: + """Manage resource extraction and packages""" + extraction_path = None + + def __init__(self): + self.cached_files = {} + + def resource_exists(self, package_or_requirement, resource_name): + """Does the named resource exist?""" + return get_provider(package_or_requirement).has_resource(resource_name) + + def resource_isdir(self, package_or_requirement, resource_name): + """Is the named resource an existing directory?""" + return get_provider(package_or_requirement).resource_isdir( + resource_name + ) + + def resource_filename(self, package_or_requirement, resource_name): + """Return a true filesystem path for specified resource""" + return get_provider(package_or_requirement).get_resource_filename( + self, resource_name + ) + + def resource_stream(self, package_or_requirement, resource_name): + """Return a readable file-like object for specified resource""" + return get_provider(package_or_requirement).get_resource_stream( + self, resource_name + ) + + def resource_string(self, package_or_requirement, resource_name): + """Return specified resource as a string""" + return get_provider(package_or_requirement).get_resource_string( + self, resource_name + ) + + def resource_listdir(self, package_or_requirement, resource_name): + """List the contents of the named resource directory""" + return get_provider(package_or_requirement).resource_listdir( + resource_name + ) + + def extraction_error(self): + """Give an error message for problems extracting file(s)""" + + old_exc = sys.exc_info()[1] + cache_path = self.extraction_path or get_default_cache() + + err = ExtractionError("""Can't extract file(s) to egg cache + +The following error occurred while trying to extract file(s) to the Python egg +cache: + + %s + +The Python egg cache directory is currently set to: + + %s + +Perhaps your account does not have write access to this directory? You can +change the cache directory by setting the PYTHON_EGG_CACHE environment +variable to point to an accessible directory. +""" % (old_exc, cache_path) + ) + err.manager = self + err.cache_path = cache_path + err.original_error = old_exc + raise err + + + + + + + + + + + + + + + + def get_cache_path(self, archive_name, names=()): + """Return absolute location in cache for `archive_name` and `names` + + The parent directory of the resulting path will be created if it does + not already exist. `archive_name` should be the base filename of the + enclosing egg (which may not be the name of the enclosing zipfile!), + including its ".egg" extension. `names`, if provided, should be a + sequence of path name parts "under" the egg's extraction location. + + This method should only be called by resource providers that need to + obtain an extraction location, and only for names they intend to + extract, as it tracks the generated names for possible cleanup later. + """ + extract_path = self.extraction_path or get_default_cache() + target_path = os.path.join(extract_path, archive_name+'-tmp', *names) + try: + _bypass_ensure_directory(target_path) + except: + self.extraction_error() + + self.cached_files[target_path] = 1 + return target_path + + + + + + + + + + + + + + + + + + + + def postprocess(self, tempname, filename): + """Perform any platform-specific postprocessing of `tempname` + + This is where Mac header rewrites should be done; other platforms don't + have anything special they should do. + + Resource providers should call this method ONLY after successfully + extracting a compressed resource. They must NOT call it on resources + that are already in the filesystem. + + `tempname` is the current (temporary) name of the file, and `filename` + is the name it will be renamed to by the caller after this routine + returns. + """ + + if os.name == 'posix': + # Make the resource executable + mode = ((os.stat(tempname).st_mode) | 0555) & 07777 + os.chmod(tempname, mode) + + + + + + + + + + + + + + + + + + + + + + + def set_extraction_path(self, path): + """Set the base path where resources will be extracted to, if needed. + + If you do not call this routine before any extractions take place, the + path defaults to the return value of ``get_default_cache()``. (Which + is based on the ``PYTHON_EGG_CACHE`` environment variable, with various + platform-specific fallbacks. See that routine's documentation for more + details.) + + Resources are extracted to subdirectories of this path based upon + information given by the ``IResourceProvider``. You may set this to a + temporary directory, but then you must call ``cleanup_resources()`` to + delete the extracted files when done. There is no guarantee that + ``cleanup_resources()`` will be able to remove all extracted files. + + (Note: you may not change the extraction path for a given resource + manager once resources have been extracted, unless you first call + ``cleanup_resources()``.) + """ + if self.cached_files: + raise ValueError( + "Can't change extraction path, files already extracted" + ) + + self.extraction_path = path + + def cleanup_resources(self, force=False): + """ + Delete all extracted resource files and directories, returning a list + of the file and directory names that could not be successfully removed. + This function does not have any concurrency protection, so it should + generally only be called when the extraction path is a temporary + directory exclusive to a single process. This method is not + automatically called; you must call it explicitly or register it as an + ``atexit`` function if you wish to ensure cleanup of a temporary + directory used for extractions. + """ + # XXX + + + +def get_default_cache(): + """Determine the default cache location + + This returns the ``PYTHON_EGG_CACHE`` environment variable, if set. + Otherwise, on Windows, it returns a "Python-Eggs" subdirectory of the + "Application Data" directory. On all other systems, it's "~/.python-eggs". + """ + try: + return os.environ['PYTHON_EGG_CACHE'] + except KeyError: + pass + + if os.name!='nt': + return os.path.expanduser('~/.python-eggs') + + app_data = 'Application Data' # XXX this may be locale-specific! + app_homes = [ + (('APPDATA',), None), # best option, should be locale-safe + (('USERPROFILE',), app_data), + (('HOMEDRIVE','HOMEPATH'), app_data), + (('HOMEPATH',), app_data), + (('HOME',), None), + (('WINDIR',), app_data), # 95/98/ME + ] + + for keys, subdir in app_homes: + dirname = '' + for key in keys: + if key in os.environ: + dirname = os.path.join(dirname, os.environ[key]) + else: + break + else: + if subdir: + dirname = os.path.join(dirname,subdir) + return os.path.join(dirname, 'Python-Eggs') + else: + raise RuntimeError( + "Please set the PYTHON_EGG_CACHE enviroment variable" + ) + +def safe_name(name): + """Convert an arbitrary string to a standard distribution name + + Any runs of non-alphanumeric/. characters are replaced with a single '-'. + """ + return re.sub('[^A-Za-z0-9.]+', '-', name) + + +def safe_version(version): + """Convert an arbitrary string to a standard version string + + Spaces become dots, and all other non-alphanumeric characters become + dashes, with runs of multiple dashes condensed to a single dash. + """ + version = version.replace(' ','.') + return re.sub('[^A-Za-z0-9.]+', '-', version) + + +def safe_extra(extra): + """Convert an arbitrary string to a standard 'extra' name + + Any runs of non-alphanumeric characters are replaced with a single '_', + and the result is always lowercased. + """ + return re.sub('[^A-Za-z0-9.]+', '_', extra).lower() + + +def to_filename(name): + """Convert a project or version name to its filename-escaped form + + Any '-' characters are currently replaced with '_'. + """ + return name.replace('-','_') + + + + + + + + +class NullProvider: + """Try to implement resources and metadata for arbitrary PEP 302 loaders""" + + egg_name = None + egg_info = None + loader = None + + def __init__(self, module): + self.loader = getattr(module, '__loader__', None) + self.module_path = os.path.dirname(getattr(module, '__file__', '')) + + def get_resource_filename(self, manager, resource_name): + return self._fn(self.module_path, resource_name) + + def get_resource_stream(self, manager, resource_name): + return StringIO(self.get_resource_string(manager, resource_name)) + + def get_resource_string(self, manager, resource_name): + return self._get(self._fn(self.module_path, resource_name)) + + def has_resource(self, resource_name): + return self._has(self._fn(self.module_path, resource_name)) + + def has_metadata(self, name): + return self.egg_info and self._has(self._fn(self.egg_info,name)) + + def get_metadata(self, name): + if not self.egg_info: + return "" + return self._get(self._fn(self.egg_info,name)) + + def get_metadata_lines(self, name): + return yield_lines(self.get_metadata(name)) + + def resource_isdir(self,resource_name): + return self._isdir(self._fn(self.module_path, resource_name)) + + def metadata_isdir(self,name): + return self.egg_info and self._isdir(self._fn(self.egg_info,name)) + + + def resource_listdir(self,resource_name): + return self._listdir(self._fn(self.module_path,resource_name)) + + def metadata_listdir(self,name): + if self.egg_info: + return self._listdir(self._fn(self.egg_info,name)) + return [] + + def run_script(self,script_name,namespace): + script = 'scripts/'+script_name + if not self.has_metadata(script): + raise ResolutionError("No script named %r" % script_name) + script_text = self.get_metadata(script).replace('\r\n','\n') + script_text = script_text.replace('\r','\n') + script_filename = self._fn(self.egg_info,script) + namespace['__file__'] = script_filename + if os.path.exists(script_filename): + execfile(script_filename, namespace, namespace) + else: + from linecache import cache + cache[script_filename] = ( + len(script_text), 0, script_text.split('\n'), script_filename + ) + script_code = compile(script_text,script_filename,'exec') + exec script_code in namespace, namespace + + def _has(self, path): + raise NotImplementedError( + "Can't perform this operation for unregistered loader type" + ) + + def _isdir(self, path): + raise NotImplementedError( + "Can't perform this operation for unregistered loader type" + ) + + def _listdir(self, path): + raise NotImplementedError( + "Can't perform this operation for unregistered loader type" + ) + + def _fn(self, base, resource_name): + if resource_name: + return os.path.join(base, *resource_name.split('/')) + return base + + def _get(self, path): + if hasattr(self.loader, 'get_data'): + return self.loader.get_data(path) + raise NotImplementedError( + "Can't perform this operation for loaders without 'get_data()'" + ) + +register_loader_type(object, NullProvider) + + +class EggProvider(NullProvider): + """Provider based on a virtual filesystem""" + + def __init__(self,module): + NullProvider.__init__(self,module) + self._setup_prefix() + + def _setup_prefix(self): + # we assume here that our metadata may be nested inside a "basket" + # of multiple eggs; that's why we use module_path instead of .archive + path = self.module_path + old = None + while path!=old: + if path.lower().endswith('.egg'): + self.egg_name = os.path.basename(path) + self.egg_info = os.path.join(path, 'EGG-INFO') + self.egg_root = path + break + old = path + path, base = os.path.split(path) + + + + + + +class DefaultProvider(EggProvider): + """Provides access to package resources in the filesystem""" + + def _has(self, path): + return os.path.exists(path) + + def _isdir(self,path): + return os.path.isdir(path) + + def _listdir(self,path): + return os.listdir(path) + + def get_resource_stream(self, manager, resource_name): + return open(self._fn(self.module_path, resource_name), 'rb') + + def _get(self, path): + stream = open(path, 'rb') + try: + return stream.read() + finally: + stream.close() + +register_loader_type(type(None), DefaultProvider) + + +class EmptyProvider(NullProvider): + """Provider that returns nothing for all requests""" + + _isdir = _has = lambda self,path: False + _get = lambda self,path: '' + _listdir = lambda self,path: [] + module_path = None + + def __init__(self): + pass + +empty_provider = EmptyProvider() + + + + +class ZipProvider(EggProvider): + """Resource support for zips and eggs""" + + eagers = None + + def __init__(self, module): + EggProvider.__init__(self,module) + self.zipinfo = zipimport._zip_directory_cache[self.loader.archive] + self.zip_pre = self.loader.archive+os.sep + + def _zipinfo_name(self, fspath): + # Convert a virtual filename (full path to file) into a zipfile subpath + # usable with the zipimport directory cache for our target archive + if fspath.startswith(self.zip_pre): + return fspath[len(self.zip_pre):] + raise AssertionError( + "%s is not a subpath of %s" % (fspath,self.zip_pre) + ) + + def _parts(self,zip_path): + # Convert a zipfile subpath into an egg-relative path part list + fspath = self.zip_pre+zip_path # pseudo-fs path + if fspath.startswith(self.egg_root+os.sep): + return fspath[len(self.egg_root)+1:].split(os.sep) + raise AssertionError( + "%s is not a subpath of %s" % (fspath,self.egg_root) + ) + + def get_resource_filename(self, manager, resource_name): + if not self.egg_name: + raise NotImplementedError( + "resource_filename() only supported for .egg, not .zip" + ) + # no need to lock for extraction, since we use temp names + zip_path = self._resource_to_zip(resource_name) + eagers = self._get_eager_resources() + if '/'.join(self._parts(zip_path)) in eagers: + for name in eagers: + self._extract_resource(manager, self._eager_to_zip(name)) + return self._extract_resource(manager, zip_path) + + def _extract_resource(self, manager, zip_path): + + if zip_path in self._index(): + for name in self._index()[zip_path]: + last = self._extract_resource( + manager, os.path.join(zip_path, name) + ) + return os.path.dirname(last) # return the extracted directory name + + zip_stat = self.zipinfo[zip_path] + t,d,size = zip_stat[5], zip_stat[6], zip_stat[3] + date_time = ( + (d>>9)+1980, (d>>5)&0xF, d&0x1F, # ymd + (t&0xFFFF)>>11, (t>>5)&0x3F, (t&0x1F) * 2, 0, 0, -1 # hms, etc. + ) + timestamp = time.mktime(date_time) + + try: + real_path = manager.get_cache_path( + self.egg_name, self._parts(zip_path) + ) + + if os.path.isfile(real_path): + stat = os.stat(real_path) + if stat.st_size==size and stat.st_mtime==timestamp: + # size and stamp match, don't bother extracting + return real_path + + outf, tmpnam = _mkstemp(".$extract", dir=os.path.dirname(real_path)) + os.write(outf, self.loader.get_data(zip_path)) + os.close(outf) + utime(tmpnam, (timestamp,timestamp)) + manager.postprocess(tmpnam, real_path) + + try: + rename(tmpnam, real_path) + + except os.error: + if os.path.isfile(real_path): + stat = os.stat(real_path) + + if stat.st_size==size and stat.st_mtime==timestamp: + # size and stamp match, somebody did it just ahead of + # us, so we're done + return real_path + elif os.name=='nt': # Windows, del old file and retry + unlink(real_path) + rename(tmpnam, real_path) + return real_path + raise + + except os.error: + manager.extraction_error() # report a user-friendly error + + return real_path + + def _get_eager_resources(self): + if self.eagers is None: + eagers = [] + for name in ('native_libs.txt', 'eager_resources.txt'): + if self.has_metadata(name): + eagers.extend(self.get_metadata_lines(name)) + self.eagers = eagers + return self.eagers + + def _index(self): + try: + return self._dirindex + except AttributeError: + ind = {} + for path in self.zipinfo: + parts = path.split(os.sep) + while parts: + parent = os.sep.join(parts[:-1]) + if parent in ind: + ind[parent].append(parts[-1]) + break + else: + ind[parent] = [parts.pop()] + self._dirindex = ind + return ind + + def _has(self, fspath): + zip_path = self._zipinfo_name(fspath) + return zip_path in self.zipinfo or zip_path in self._index() + + def _isdir(self,fspath): + return self._zipinfo_name(fspath) in self._index() + + def _listdir(self,fspath): + return list(self._index().get(self._zipinfo_name(fspath), ())) + + def _eager_to_zip(self,resource_name): + return self._zipinfo_name(self._fn(self.egg_root,resource_name)) + + def _resource_to_zip(self,resource_name): + return self._zipinfo_name(self._fn(self.module_path,resource_name)) + +register_loader_type(zipimport.zipimporter, ZipProvider) + + + + + + + + + + + + + + + + + + + + + + + + +class FileMetadata(EmptyProvider): + """Metadata handler for standalone PKG-INFO files + + Usage:: + + metadata = FileMetadata("/path/to/PKG-INFO") + + This provider rejects all data and metadata requests except for PKG-INFO, + which is treated as existing, and will be the contents of the file at + the provided location. + """ + + def __init__(self,path): + self.path = path + + def has_metadata(self,name): + return name=='PKG-INFO' + + def get_metadata(self,name): + if name=='PKG-INFO': + return open(self.path,'rU').read() + raise KeyError("No metadata except PKG-INFO is available") + + def get_metadata_lines(self,name): + return yield_lines(self.get_metadata(name)) + + + + + + + + + + + + + + + + +class PathMetadata(DefaultProvider): + """Metadata provider for egg directories + + Usage:: + + # Development eggs: + + egg_info = "/path/to/PackageName.egg-info" + base_dir = os.path.dirname(egg_info) + metadata = PathMetadata(base_dir, egg_info) + dist_name = os.path.splitext(os.path.basename(egg_info))[0] + dist = Distribution(basedir,project_name=dist_name,metadata=metadata) + + # Unpacked egg directories: + + egg_path = "/path/to/PackageName-ver-pyver-etc.egg" + metadata = PathMetadata(egg_path, os.path.join(egg_path,'EGG-INFO')) + dist = Distribution.from_filename(egg_path, metadata=metadata) + """ + + def __init__(self, path, egg_info): + self.module_path = path + self.egg_info = egg_info + + +class EggMetadata(ZipProvider): + """Metadata provider for .egg files""" + + def __init__(self, importer): + """Create a metadata provider from a zipimporter""" + + self.zipinfo = zipimport._zip_directory_cache[importer.archive] + self.zip_pre = importer.archive+os.sep + self.loader = importer + if importer.prefix: + self.module_path = os.path.join(importer.archive, importer.prefix) + else: + self.module_path = importer.archive + self._setup_prefix() + + +class ImpWrapper: + """PEP 302 Importer that wraps Python's "normal" import algorithm""" + + def __init__(self, path=None): + self.path = path + + def find_module(self, fullname, path=None): + subname = fullname.split(".")[-1] + if subname != fullname and self.path is None: + return None + if self.path is None: + path = None + else: + path = [self.path] + try: + file, filename, etc = imp.find_module(subname, path) + except ImportError: + return None + return ImpLoader(file, filename, etc) + + +class ImpLoader: + """PEP 302 Loader that wraps Python's "normal" import algorithm""" + + def __init__(self, file, filename, etc): + self.file = file + self.filename = filename + self.etc = etc + + def load_module(self, fullname): + try: + mod = imp.load_module(fullname, self.file, self.filename, self.etc) + finally: + if self.file: self.file.close() + # Note: we don't set __loader__ because we want the module to look + # normal; i.e. this is just a wrapper for standard import machinery + return mod + + + + +def get_importer(path_item): + """Retrieve a PEP 302 "importer" for the given path item + + If there is no importer, this returns a wrapper around the builtin import + machinery. The returned importer is only cached if it was created by a + path hook. + """ + try: + importer = sys.path_importer_cache[path_item] + except KeyError: + for hook in sys.path_hooks: + try: + importer = hook(path_item) + except ImportError: + pass + else: + break + else: + importer = None + + sys.path_importer_cache.setdefault(path_item,importer) + if importer is None: + try: + importer = ImpWrapper(path_item) + except ImportError: + pass + return importer + + + + + + + + + + + + + + +_declare_state('dict', _distribution_finders = {}) + +def register_finder(importer_type, distribution_finder): + """Register `distribution_finder` to find distributions in sys.path items + + `importer_type` is the type or class of a PEP 302 "Importer" (sys.path item + handler), and `distribution_finder` is a callable that, passed a path + item and the importer instance, yields ``Distribution`` instances found on + that path item. See ``pkg_resources.find_on_path`` for an example.""" + _distribution_finders[importer_type] = distribution_finder + + +def find_distributions(path_item, only=False): + """Yield distributions accessible via `path_item`""" + importer = get_importer(path_item) + finder = _find_adapter(_distribution_finders, importer) + return finder(importer, path_item, only) + +def find_in_zip(importer, path_item, only=False): + metadata = EggMetadata(importer) + if metadata.has_metadata('PKG-INFO'): + yield Distribution.from_filename(path_item, metadata=metadata) + if only: + return # don't yield nested distros + for subitem in metadata.resource_listdir('/'): + if subitem.endswith('.egg'): + subpath = os.path.join(path_item, subitem) + for dist in find_in_zip(zipimport.zipimporter(subpath), subpath): + yield dist + +register_finder(zipimport.zipimporter, find_in_zip) + +def StringIO(*args, **kw): + """Thunk to load the real StringIO on demand""" + global StringIO + try: + from cStringIO import StringIO + except ImportError: + from StringIO import StringIO + return StringIO(*args,**kw) + +def find_nothing(importer, path_item, only=False): + return () +register_finder(object,find_nothing) + +def find_on_path(importer, path_item, only=False): + """Yield distributions accessible on a sys.path directory""" + path_item = _normalize_cached(path_item) + + if os.path.isdir(path_item) and os.access(path_item, os.R_OK): + if path_item.lower().endswith('.egg'): + # unpacked egg + yield Distribution.from_filename( + path_item, metadata=PathMetadata( + path_item, os.path.join(path_item,'EGG-INFO') + ) + ) + else: + # scan for .egg and .egg-info in directory + for entry in os.listdir(path_item): + lower = entry.lower() + if lower.endswith('.egg-info'): + fullpath = os.path.join(path_item, entry) + if os.path.isdir(fullpath): + # egg-info directory, allow getting metadata + metadata = PathMetadata(path_item, fullpath) + else: + metadata = FileMetadata(fullpath) + yield Distribution.from_location( + path_item,entry,metadata,precedence=DEVELOP_DIST + ) + elif not only and lower.endswith('.egg'): + for dist in find_distributions(os.path.join(path_item, entry)): + yield dist + elif not only and lower.endswith('.egg-link'): + for line in file(os.path.join(path_item, entry)): + if not line.strip(): continue + for item in find_distributions(os.path.join(path_item,line.rstrip())): + yield item + break +register_finder(ImpWrapper, find_on_path) + +_declare_state('dict', _namespace_handlers = {}) +_declare_state('dict', _namespace_packages = {}) + +def register_namespace_handler(importer_type, namespace_handler): + """Register `namespace_handler` to declare namespace packages + + `importer_type` is the type or class of a PEP 302 "Importer" (sys.path item + handler), and `namespace_handler` is a callable like this:: + + def namespace_handler(importer,path_entry,moduleName,module): + # return a path_entry to use for child packages + + Namespace handlers are only called if the importer object has already + agreed that it can handle the relevant path item, and they should only + return a subpath if the module __path__ does not already contain an + equivalent subpath. For an example namespace handler, see + ``pkg_resources.file_ns_handler``. + """ + _namespace_handlers[importer_type] = namespace_handler + +def _handle_ns(packageName, path_item): + """Ensure that named package includes a subpath of path_item (if needed)""" + importer = get_importer(path_item) + if importer is None: + return None + loader = importer.find_module(packageName) + if loader is None: + return None + module = sys.modules.get(packageName) + if module is None: + module = sys.modules[packageName] = imp.new_module(packageName) + module.__path__ = []; _set_parent_ns(packageName) + elif not hasattr(module,'__path__'): + raise TypeError("Not a package:", packageName) + handler = _find_adapter(_namespace_handlers, importer) + subpath = handler(importer,path_item,packageName,module) + if subpath is not None: + path = module.__path__; path.append(subpath) + loader.load_module(packageName); module.__path__ = path + return subpath + +def declare_namespace(packageName): + """Declare that package 'packageName' is a namespace package""" + + imp.acquire_lock() + try: + if packageName in _namespace_packages: + return + + path, parent = sys.path, None + if '.' in packageName: + parent = '.'.join(packageName.split('.')[:-1]) + declare_namespace(parent) + __import__(parent) + try: + path = sys.modules[parent].__path__ + except AttributeError: + raise TypeError("Not a package:", parent) + + # Track what packages are namespaces, so when new path items are added, + # they can be updated + _namespace_packages.setdefault(parent,[]).append(packageName) + _namespace_packages.setdefault(packageName,[]) + + for path_item in path: + # Ensure all the parent's path items are reflected in the child, + # if they apply + _handle_ns(packageName, path_item) + + finally: + imp.release_lock() + +def fixup_namespace_packages(path_item, parent=None): + """Ensure that previously-declared namespace packages include path_item""" + imp.acquire_lock() + try: + for package in _namespace_packages.get(parent,()): + subpath = _handle_ns(package, path_item) + if subpath: fixup_namespace_packages(subpath,package) + finally: + imp.release_lock() + +def file_ns_handler(importer, path_item, packageName, module): + """Compute an ns-package subpath for a filesystem or zipfile importer""" + + subpath = os.path.join(path_item, packageName.split('.')[-1]) + normalized = _normalize_cached(subpath) + for item in module.__path__: + if _normalize_cached(item)==normalized: + break + else: + # Only return the path if it's not already there + return subpath + +register_namespace_handler(ImpWrapper,file_ns_handler) +register_namespace_handler(zipimport.zipimporter,file_ns_handler) + + +def null_ns_handler(importer, path_item, packageName, module): + return None + +register_namespace_handler(object,null_ns_handler) + + +def normalize_path(filename): + """Normalize a file/dir name for comparison purposes""" + return os.path.normcase(os.path.realpath(filename)) + +def _normalize_cached(filename,_cache={}): + try: + return _cache[filename] + except KeyError: + _cache[filename] = result = normalize_path(filename) + return result + +def _set_parent_ns(packageName): + parts = packageName.split('.') + name = parts.pop() + if parts: + parent = '.'.join(parts) + setattr(sys.modules[parent], name, sys.modules[packageName]) + + +def yield_lines(strs): + """Yield non-empty/non-comment lines of a ``basestring`` or sequence""" + if isinstance(strs,basestring): + for s in strs.splitlines(): + s = s.strip() + if s and not s.startswith('#'): # skip blank lines/comments + yield s + else: + for ss in strs: + for s in yield_lines(ss): + yield s + +LINE_END = re.compile(r"\s*(#.*)?$").match # whitespace and comment +CONTINUE = re.compile(r"\s*\\\s*(#.*)?$").match # line continuation +DISTRO = re.compile(r"\s*((\w|[-.])+)").match # Distribution or extra +VERSION = re.compile(r"\s*(<=?|>=?|==|!=)\s*((\w|[-.])+)").match # ver. info +COMMA = re.compile(r"\s*,").match # comma between items +OBRACKET = re.compile(r"\s*\[").match +CBRACKET = re.compile(r"\s*\]").match +MODULE = re.compile(r"\w+(\.\w+)*$").match +EGG_NAME = re.compile( + r"(?P[^-]+)" + r"( -(?P[^-]+) (-py(?P[^-]+) (-(?P.+))? )? )?", + re.VERBOSE | re.IGNORECASE +).match + +component_re = re.compile(r'(\d+ | [a-z]+ | \.| -)', re.VERBOSE) +replace = {'pre':'c', 'preview':'c','-':'final-','rc':'c','dev':'@'}.get + +def _parse_version_parts(s): + for part in component_re.split(s): + part = replace(part,part) + if not part or part=='.': + continue + if part[:1] in '0123456789': + yield part.zfill(8) # pad for numeric comparison + else: + yield '*'+part + + yield '*final' # ensure that alpha/beta/candidate are before final + +def parse_version(s): + """Convert a version string to a chronologically-sortable key + + This is a rough cross between distutils' StrictVersion and LooseVersion; + if you give it versions that would work with StrictVersion, then it behaves + the same; otherwise it acts like a slightly-smarter LooseVersion. It is + *possible* to create pathological version coding schemes that will fool + this parser, but they should be very rare in practice. + + The returned value will be a tuple of strings. Numeric portions of the + version are padded to 8 digits so they will compare numerically, but + without relying on how numbers compare relative to strings. Dots are + dropped, but dashes are retained. Trailing zeros between alpha segments + or dashes are suppressed, so that e.g. "2.4.0" is considered the same as + "2.4". Alphanumeric parts are lower-cased. + + The algorithm assumes that strings like "-" and any alpha string that + alphabetically follows "final" represents a "patch level". So, "2.4-1" + is assumed to be a branch or patch of "2.4", and therefore "2.4.1" is + considered newer than "2.4-1", which in turn is newer than "2.4". + + Strings like "a", "b", "c", "alpha", "beta", "candidate" and so on (that + come before "final" alphabetically) are assumed to be pre-release versions, + so that the version "2.4" is considered newer than "2.4a1". + + Finally, to handle miscellaneous cases, the strings "pre", "preview", and + "rc" are treated as if they were "c", i.e. as though they were release + candidates, and therefore are not as new as a version string that does not + contain them, and "dev" is replaced with an '@' so that it sorts lower than + than any other pre-release tag. + """ + parts = [] + for part in _parse_version_parts(s.lower()): + if part.startswith('*'): + if part<'*final': # remove '-' before a prerelease tag + while parts and parts[-1]=='*final-': parts.pop() + # remove trailing zeros from each series of numeric parts + while parts and parts[-1]=='00000000': + parts.pop() + parts.append(part) + return tuple(parts) + +class EntryPoint(object): + """Object representing an advertised importable object""" + + def __init__(self, name, module_name, attrs=(), extras=(), dist=None): + if not MODULE(module_name): + raise ValueError("Invalid module name", module_name) + self.name = name + self.module_name = module_name + self.attrs = tuple(attrs) + self.extras = Requirement.parse(("x[%s]" % ','.join(extras))).extras + self.dist = dist + + def __str__(self): + s = "%s = %s" % (self.name, self.module_name) + if self.attrs: + s += ':' + '.'.join(self.attrs) + if self.extras: + s += ' [%s]' % ','.join(self.extras) + return s + + def __repr__(self): + return "EntryPoint.parse(%r)" % str(self) + + def load(self, require=True, env=None, installer=None): + if require: self.require(env, installer) + entry = __import__(self.module_name, globals(),globals(), ['__name__']) + for attr in self.attrs: + try: + entry = getattr(entry,attr) + except AttributeError: + raise ImportError("%r has no %r attribute" % (entry,attr)) + return entry + + def require(self, env=None, installer=None): + if self.extras and not self.dist: + raise UnknownExtra("Can't require() without a distribution", self) + map(working_set.add, + working_set.resolve(self.dist.requires(self.extras),env,installer)) + + + + #@classmethod + def parse(cls, src, dist=None): + """Parse a single entry point from string `src` + + Entry point syntax follows the form:: + + name = some.module:some.attr [extra1,extra2] + + The entry name and module name are required, but the ``:attrs`` and + ``[extras]`` parts are optional + """ + try: + attrs = extras = () + name,value = src.split('=',1) + if '[' in value: + value,extras = value.split('[',1) + req = Requirement.parse("x["+extras) + if req.specs: raise ValueError + extras = req.extras + if ':' in value: + value,attrs = value.split(':',1) + if not MODULE(attrs.rstrip()): + raise ValueError + attrs = attrs.rstrip().split('.') + except ValueError: + raise ValueError( + "EntryPoint must be in 'name=module:attrs [extras]' format", + src + ) + else: + return cls(name.strip(), value.strip(), attrs, extras, dist) + + parse = classmethod(parse) + + + + + + + + + #@classmethod + def parse_group(cls, group, lines, dist=None): + """Parse an entry point group""" + if not MODULE(group): + raise ValueError("Invalid group name", group) + this = {} + for line in yield_lines(lines): + ep = cls.parse(line, dist) + if ep.name in this: + raise ValueError("Duplicate entry point", group, ep.name) + this[ep.name]=ep + return this + + parse_group = classmethod(parse_group) + + #@classmethod + def parse_map(cls, data, dist=None): + """Parse a map of entry point groups""" + if isinstance(data,dict): + data = data.items() + else: + data = split_sections(data) + maps = {} + for group, lines in data: + if group is None: + if not lines: + continue + raise ValueError("Entry points must be listed in groups") + group = group.strip() + if group in maps: + raise ValueError("Duplicate group name", group) + maps[group] = cls.parse_group(group, lines, dist) + return maps + + parse_map = classmethod(parse_map) + + + + + + +class Distribution(object): + """Wrap an actual or potential sys.path entry w/metadata""" + def __init__(self, + location=None, metadata=None, project_name=None, version=None, + py_version=PY_MAJOR, platform=None, precedence = EGG_DIST + ): + self.project_name = safe_name(project_name or 'Unknown') + if version is not None: + self._version = safe_version(version) + self.py_version = py_version + self.platform = platform + self.location = location + self.precedence = precedence + self._provider = metadata or empty_provider + + #@classmethod + def from_location(cls,location,basename,metadata=None,**kw): + project_name, version, py_version, platform = [None]*4 + basename, ext = os.path.splitext(basename) + if ext.lower() in (".egg",".egg-info"): + match = EGG_NAME(basename) + if match: + project_name, version, py_version, platform = match.group( + 'name','ver','pyver','plat' + ) + return cls( + location, metadata, project_name=project_name, version=version, + py_version=py_version, platform=platform, **kw + ) + from_location = classmethod(from_location) + + hashcmp = property( + lambda self: ( + getattr(self,'parsed_version',()), self.precedence, self.key, + -len(self.location or ''), self.location, self.py_version, + self.platform + ) + ) + def __cmp__(self, other): return cmp(self.hashcmp, other) + def __hash__(self): return hash(self.hashcmp) + + # These properties have to be lazy so that we don't have to load any + # metadata until/unless it's actually needed. (i.e., some distributions + # may not know their name or version without loading PKG-INFO) + + #@property + def key(self): + try: + return self._key + except AttributeError: + self._key = key = self.project_name.lower() + return key + key = property(key) + + #@property + def parsed_version(self): + try: + return self._parsed_version + except AttributeError: + self._parsed_version = pv = parse_version(self.version) + return pv + + parsed_version = property(parsed_version) + + #@property + def version(self): + try: + return self._version + except AttributeError: + for line in self._get_metadata('PKG-INFO'): + if line.lower().startswith('version:'): + self._version = safe_version(line.split(':',1)[1].strip()) + return self._version + else: + raise ValueError( + "Missing 'Version:' header and/or PKG-INFO file", self + ) + version = property(version) + + + + + #@property + def _dep_map(self): + try: + return self.__dep_map + except AttributeError: + dm = self.__dep_map = {None: []} + for name in 'requires.txt', 'depends.txt': + for extra,reqs in split_sections(self._get_metadata(name)): + if extra: extra = safe_extra(extra) + dm.setdefault(extra,[]).extend(parse_requirements(reqs)) + return dm + _dep_map = property(_dep_map) + + def requires(self,extras=()): + """List of Requirements needed for this distro if `extras` are used""" + dm = self._dep_map + deps = [] + deps.extend(dm.get(None,())) + for ext in extras: + try: + deps.extend(dm[safe_extra(ext)]) + except KeyError: + raise UnknownExtra( + "%s has no such extra feature %r" % (self, ext) + ) + return deps + + def _get_metadata(self,name): + if self.has_metadata(name): + for line in self.get_metadata_lines(name): + yield line + + def activate(self,path=None): + """Ensure distribution is importable on `path` (default=sys.path)""" + if path is None: path = sys.path + self.insert_on(path) + if path is sys.path: + fixup_namespace_packages(self.location) + for pkg in self._get_metadata('namespace_packages.txt'): + if pkg in sys.modules: declare_namespace(pkg) + + def egg_name(self): + """Return what this distribution's standard .egg filename should be""" + filename = "%s-%s-py%s" % ( + to_filename(self.project_name), to_filename(self.version), + self.py_version or PY_MAJOR + ) + + if self.platform: + filename += '-'+self.platform + return filename + + def __repr__(self): + if self.location: + return "%s (%s)" % (self,self.location) + else: + return str(self) + + def __str__(self): + try: version = getattr(self,'version',None) + except ValueError: version = None + version = version or "[unknown version]" + return "%s %s" % (self.project_name,version) + + def __getattr__(self,attr): + """Delegate all unrecognized public attributes to .metadata provider""" + if attr.startswith('_'): + raise AttributeError,attr + return getattr(self._provider, attr) + + #@classmethod + def from_filename(cls,filename,metadata=None, **kw): + return cls.from_location( + _normalize_cached(filename), os.path.basename(filename), metadata, + **kw + ) + from_filename = classmethod(from_filename) + + def as_requirement(self): + """Return a ``Requirement`` that matches this distribution exactly""" + return Requirement.parse('%s==%s' % (self.project_name, self.version)) + + def load_entry_point(self, group, name): + """Return the `name` entry point of `group` or raise ImportError""" + ep = self.get_entry_info(group,name) + if ep is None: + raise ImportError("Entry point %r not found" % ((group,name),)) + return ep.load() + + def get_entry_map(self, group=None): + """Return the entry point map for `group`, or the full entry map""" + try: + ep_map = self._ep_map + except AttributeError: + ep_map = self._ep_map = EntryPoint.parse_map( + self._get_metadata('entry_points.txt'), self + ) + if group is not None: + return ep_map.get(group,{}) + return ep_map + + def get_entry_info(self, group, name): + """Return the EntryPoint object for `group`+`name`, or ``None``""" + return self.get_entry_map(group).get(name) + + + + + + + + + + + + + + + + + + + + def insert_on(self, path, loc = None): + """Insert self.location in path before its nearest parent directory""" + + loc = loc or self.location + if not loc: + return + + nloc = _normalize_cached(loc) + bdir = os.path.dirname(nloc) + npath= [(p and _normalize_cached(p) or p) for p in path] + + bp = None + for p, item in enumerate(npath): + if item==nloc: + break + elif item==bdir and self.precedence==EGG_DIST: + # if it's an .egg, give it precedence over its directory + if path is sys.path: + self.check_version_conflict() + path.insert(p, loc) + npath.insert(p, nloc) + break + else: + if path is sys.path: + self.check_version_conflict() + path.append(loc) + return + + # p is the spot where we found or inserted loc; now remove duplicates + while 1: + try: + np = npath.index(nloc, p+1) + except ValueError: + break + else: + del npath[np], path[np] + p = np # ha! + + return + + + def check_version_conflict(self): + if self.key=='setuptools': + return # ignore the inevitable setuptools self-conflicts :( + + nsp = dict.fromkeys(self._get_metadata('namespace_packages.txt')) + loc = normalize_path(self.location) + for modname in self._get_metadata('top_level.txt'): + if (modname not in sys.modules or modname in nsp + or modname in _namespace_packages + ): + continue + + fn = getattr(sys.modules[modname], '__file__', None) + if fn and (normalize_path(fn).startswith(loc) or fn.startswith(loc)): + continue + issue_warning( + "Module %s was already imported from %s, but %s is being added" + " to sys.path" % (modname, fn, self.location), + ) + + def has_version(self): + try: + self.version + except ValueError: + issue_warning("Unbuilt egg for "+repr(self)) + return False + return True + + def clone(self,**kw): + """Copy this distribution, substituting in any changed keyword args""" + for attr in ( + 'project_name', 'version', 'py_version', 'platform', 'location', + 'precedence' + ): + kw.setdefault(attr, getattr(self,attr,None)) + kw.setdefault('metadata', self._provider) + return self.__class__(**kw) + + + + + #@property + def extras(self): + return [dep for dep in self._dep_map if dep] + extras = property(extras) + + +def issue_warning(*args,**kw): + level = 1 + g = globals() + try: + # find the first stack frame that is *not* code in + # the pkg_resources module, to use for the warning + while sys._getframe(level).f_globals is g: + level += 1 + except ValueError: + pass + from warnings import warn + warn(stacklevel = level+1, *args, **kw) + + + + + + + + + + + + + + + + + + + + + + + +def parse_requirements(strs): + """Yield ``Requirement`` objects for each specification in `strs` + + `strs` must be an instance of ``basestring``, or a (possibly-nested) + iterable thereof. + """ + # create a steppable iterator, so we can handle \-continuations + lines = iter(yield_lines(strs)) + + def scan_list(ITEM,TERMINATOR,line,p,groups,item_name): + + items = [] + + while not TERMINATOR(line,p): + if CONTINUE(line,p): + try: + line = lines.next(); p = 0 + except StopIteration: + raise ValueError( + "\\ must not appear on the last nonblank line" + ) + + match = ITEM(line,p) + if not match: + raise ValueError("Expected "+item_name+" in",line,"at",line[p:]) + + items.append(match.group(*groups)) + p = match.end() + + match = COMMA(line,p) + if match: + p = match.end() # skip the comma + elif not TERMINATOR(line,p): + raise ValueError( + "Expected ',' or end-of-list in",line,"at",line[p:] + ) + + match = TERMINATOR(line,p) + if match: p = match.end() # skip the terminator, if any + return line, p, items + + for line in lines: + match = DISTRO(line) + if not match: + raise ValueError("Missing distribution spec", line) + project_name = match.group(1) + p = match.end() + extras = [] + + match = OBRACKET(line,p) + if match: + p = match.end() + line, p, extras = scan_list( + DISTRO, CBRACKET, line, p, (1,), "'extra' name" + ) + + line, p, specs = scan_list(VERSION,LINE_END,line,p,(1,2),"version spec") + specs = [(op,safe_version(val)) for op,val in specs] + yield Requirement(project_name, specs, extras) + + +def _sort_dists(dists): + tmp = [(dist.hashcmp,dist) for dist in dists] + tmp.sort() + dists[::-1] = [d for hc,d in tmp] + + + + + + + + + + + + + + + + + +class Requirement: + def __init__(self, project_name, specs, extras): + """DO NOT CALL THIS UNDOCUMENTED METHOD; use Requirement.parse()!""" + self.unsafe_name, project_name = project_name, safe_name(project_name) + self.project_name, self.key = project_name, project_name.lower() + index = [(parse_version(v),state_machine[op],op,v) for op,v in specs] + index.sort() + self.specs = [(op,ver) for parsed,trans,op,ver in index] + self.index, self.extras = index, tuple(map(safe_extra,extras)) + self.hashCmp = ( + self.key, tuple([(op,parsed) for parsed,trans,op,ver in index]), + frozenset(self.extras) + ) + self.__hash = hash(self.hashCmp) + + def __str__(self): + specs = ','.join([''.join(s) for s in self.specs]) + extras = ','.join(self.extras) + if extras: extras = '[%s]' % extras + return '%s%s%s' % (self.project_name, extras, specs) + + def __eq__(self,other): + return isinstance(other,Requirement) and self.hashCmp==other.hashCmp + + def __contains__(self,item): + if isinstance(item,Distribution): + if item.key != self.key: return False + if self.index: item = item.parsed_version # only get if we need it + elif isinstance(item,basestring): + item = parse_version(item) + last = None + for parsed,trans,op,ver in self.index: + action = trans[cmp(item,parsed)] + if action=='F': return False + elif action=='T': return True + elif action=='+': last = True + elif action=='-' or last is None: last = False + if last is None: last = True # no rules encountered + return last + + + def __hash__(self): + return self.__hash + + def __repr__(self): return "Requirement.parse(%r)" % str(self) + + #@staticmethod + def parse(s): + reqs = list(parse_requirements(s)) + if reqs: + if len(reqs)==1: + return reqs[0] + raise ValueError("Expected only one requirement", s) + raise ValueError("No requirements found", s) + + parse = staticmethod(parse) + +state_machine = { + # =>< + '<' : '--T', + '<=': 'T-T', + '>' : 'F+F', + '>=': 'T+F', + '==': 'T..', + '!=': 'F++', +} + + +def _get_mro(cls): + """Get an mro for a type or classic class""" + if not isinstance(cls,type): + class cls(cls,object): pass + return cls.__mro__[1:] + return cls.__mro__ + +def _find_adapter(registry, ob): + """Return an adapter factory for `ob` from `registry`""" + for t in _get_mro(getattr(ob, '__class__', type(ob))): + if t in registry: + return registry[t] + + +def ensure_directory(path): + """Ensure that the parent directory of `path` exists""" + dirname = os.path.dirname(path) + if not os.path.isdir(dirname): + os.makedirs(dirname) + +def split_sections(s): + """Split a string or iterable thereof into (section,content) pairs + + Each ``section`` is a stripped version of the section header ("[section]") + and each ``content`` is a list of stripped lines excluding blank lines and + comment-only lines. If there are any such lines before the first section + header, they're returned in a first ``section`` of ``None``. + """ + section = None + content = [] + for line in yield_lines(s): + if line.startswith("["): + if line.endswith("]"): + if section or content: + yield section, content + section = line[1:-1].strip() + content = [] + else: + raise ValueError("Invalid section heading", line) + else: + content.append(line) + + # wrap up last segment + yield section, content + +def _mkstemp(*args,**kw): + from tempfile import mkstemp + old_open = os.open + try: + os.open = os_open # temporarily bypass sandboxing + return mkstemp(*args,**kw) + finally: + os.open = old_open # and then put it back + + +# Set up global resource manager (deliberately not state-saved) +_manager = ResourceManager() +def _initialize(g): + for name in dir(_manager): + if not name.startswith('_'): + g[name] = getattr(_manager, name) +_initialize(globals()) + +# Prepare the master working set and make the ``require()`` API available +__requires__ = None +_declare_state('object', working_set = WorkingSet()) +try: + # Does the main program list any requirements? + from __main__ import __requires__ +except ImportError: + pass # No: just use the default working set based on sys.path +else: + # Yes: ensure the requirements are met, by prefixing sys.path if necessary + try: + working_set.require(__requires__) + except (VersionConflict, DistributionNotFound): # try it without defaults already on sys.path + working_set = WorkingSet([]) # by starting with an empty path + try: + for dist in working_set.resolve( + parse_requirements(__requires__), Environment() + ): + working_set.add(dist) + except DistributionNotFound: + pass + for entry in sys.path: # add any missing entries from sys.path + if entry not in working_set.entries: + working_set.add_entry(entry) + sys.path[:] = working_set.entries # then copy back to sys.path + +require = working_set.require +iter_entry_points = working_set.iter_entry_points +add_activation_listener = working_set.subscribe +run_script = working_set.run_script +run_main = run_script # backward compatibility +# Activate all distributions already on sys.path, and ensure that +# all distributions added to the working set in the future (e.g. by +# calling ``require()``) will get activated as well. +add_activation_listener(lambda dist: dist.activate()) +working_set.entries=[]; map(working_set.add_entry,sys.path) # match order + diff -Nru tahoe-lafs-1.9.2/setuptools-0.6c16dev4.egg/setuptools/__init__.py tahoe-lafs-1.10.0/setuptools-0.6c16dev4.egg/setuptools/__init__.py --- tahoe-lafs-1.9.2/setuptools-0.6c16dev4.egg/setuptools/__init__.py 1970-01-01 00:00:00.000000000 +0000 +++ tahoe-lafs-1.10.0/setuptools-0.6c16dev4.egg/setuptools/__init__.py 2013-09-03 15:38:27.000000000 +0000 @@ -0,0 +1,90 @@ +"""Extensions to the 'distutils' for large or complex distributions""" +from setuptools.extension import Extension, Library +from setuptools.dist import Distribution, Feature, _get_unpatched +import distutils.core, setuptools.command +from setuptools.depends import Require +from distutils.core import Command as _Command +from distutils.util import convert_path +import os.path +import os +import sys + +__version__ = '0.6c16dev4' +__all__ = [ + 'setup', 'Distribution', 'Feature', 'Command', 'Extension', 'Require', + 'find_packages' +] + +bootstrap_install_from = None + +def find_packages(where='.', exclude=()): + """Return a list all Python packages found within directory 'where' + + 'where' should be supplied as a "cross-platform" (i.e. URL-style) path; it + will be converted to the appropriate local path syntax. 'exclude' is a + sequence of package names to exclude; '*' can be used as a wildcard in the + names, such that 'foo.*' will exclude all subpackages of 'foo' (but not + 'foo' itself). + """ + out = [] + stack=[(convert_path(where), '')] + while stack: + where,prefix = stack.pop(0) + for name in os.listdir(where): + fn = os.path.join(where,name) + if ('.' not in name and os.path.isdir(fn) and + os.path.isfile(os.path.join(fn,'__init__.py')) + ): + out.append(prefix+name); stack.append((fn,prefix+name+'.')) + for pat in list(exclude)+['ez_setup']: + from fnmatch import fnmatchcase + out = [item for item in out if not fnmatchcase(item,pat)] + return out + +setup = distutils.core.setup + +_Command = _get_unpatched(_Command) + +class Command(_Command): + __doc__ = _Command.__doc__ + + command_consumes_arguments = False + + def __init__(self, dist, **kw): + # Add support for keyword arguments + _Command.__init__(self,dist) + for k,v in kw.items(): + setattr(self,k,v) + + def reinitialize_command(self, command, reinit_subcommands=0, **kw): + cmd = _Command.reinitialize_command(self, command, reinit_subcommands) + for k,v in kw.items(): + setattr(cmd,k,v) # update command with keywords + return cmd + +import distutils.core +distutils.core.Command = Command # we can't patch distutils.cmd, alas + +def findall(dir = os.curdir): + """Find all files under 'dir' and return the list of full filenames + (relative to 'dir'). + """ + all_files = [] + for base, dirs, files in os.walk(dir): + if base==os.curdir or base.startswith(os.curdir+os.sep): + base = base[2:] + if base: + files = [os.path.join(base, f) for f in files] + all_files.extend(filter(os.path.isfile, files)) + return all_files + +import distutils.filelist +distutils.filelist.findall = findall # fix findall bug in distutils. + + +# sys.dont_write_bytecode was introduced in Python 2.6. +if ((hasattr(sys, "dont_write_bytecode") and sys.dont_write_bytecode) or + (not hasattr(sys, "dont_write_bytecode") and os.environ.get("PYTHONDONTWRITEBYTECODE"))): + _dont_write_bytecode = True +else: + _dont_write_bytecode = False diff -Nru tahoe-lafs-1.9.2/setuptools-0.6c16dev4.egg/setuptools/archive_util.py tahoe-lafs-1.10.0/setuptools-0.6c16dev4.egg/setuptools/archive_util.py --- tahoe-lafs-1.9.2/setuptools-0.6c16dev4.egg/setuptools/archive_util.py 1970-01-01 00:00:00.000000000 +0000 +++ tahoe-lafs-1.10.0/setuptools-0.6c16dev4.egg/setuptools/archive_util.py 2013-09-03 15:38:27.000000000 +0000 @@ -0,0 +1,205 @@ +"""Utilities for extracting common archive formats""" + + +__all__ = [ + "unpack_archive", "unpack_zipfile", "unpack_tarfile", "default_filter", + "UnrecognizedFormat", "extraction_drivers", "unpack_directory", +] + +import zipfile, tarfile, os, shutil +from pkg_resources import ensure_directory +from distutils.errors import DistutilsError + +class UnrecognizedFormat(DistutilsError): + """Couldn't recognize the archive type""" + +def default_filter(src,dst): + """The default progress/filter callback; returns True for all files""" + return dst + + + + + + + + + + + + + + + + + + + + + + + +def unpack_archive(filename, extract_dir, progress_filter=default_filter, + drivers=None +): + """Unpack `filename` to `extract_dir`, or raise ``UnrecognizedFormat`` + + `progress_filter` is a function taking two arguments: a source path + internal to the archive ('/'-separated), and a filesystem path where it + will be extracted. The callback must return the desired extract path + (which may be the same as the one passed in), or else ``None`` to skip + that file or directory. The callback can thus be used to report on the + progress of the extraction, as well as to filter the items extracted or + alter their extraction paths. + + `drivers`, if supplied, must be a non-empty sequence of functions with the + same signature as this function (minus the `drivers` argument), that raise + ``UnrecognizedFormat`` if they do not support extracting the designated + archive type. The `drivers` are tried in sequence until one is found that + does not raise an error, or until all are exhausted (in which case + ``UnrecognizedFormat`` is raised). If you do not supply a sequence of + drivers, the module's ``extraction_drivers`` constant will be used, which + means that ``unpack_zipfile`` and ``unpack_tarfile`` will be tried, in that + order. + """ + for driver in drivers or extraction_drivers: + try: + driver(filename, extract_dir, progress_filter) + except UnrecognizedFormat: + continue + else: + return + else: + raise UnrecognizedFormat( + "Not a recognized archive type: %s" % filename + ) + + + + + + + +def unpack_directory(filename, extract_dir, progress_filter=default_filter): + """"Unpack" a directory, using the same interface as for archives + + Raises ``UnrecognizedFormat`` if `filename` is not a directory + """ + if not os.path.isdir(filename): + raise UnrecognizedFormat("%s is not a directory" % (filename,)) + + paths = {filename:('',extract_dir)} + for base, dirs, files in os.walk(filename): + src,dst = paths[base] + for d in dirs: + paths[os.path.join(base,d)] = src+d+'/', os.path.join(dst,d) + for f in files: + name = src+f + target = os.path.join(dst,f) + target = progress_filter(src+f, target) + if not target: + continue # skip non-files + ensure_directory(target) + f = os.path.join(base,f) + shutil.copyfile(f, target) + shutil.copystat(f, target) + + + + + + + + + + + + + + + + + + +def unpack_zipfile(filename, extract_dir, progress_filter=default_filter): + """Unpack zip `filename` to `extract_dir` + + Raises ``UnrecognizedFormat`` if `filename` is not a zipfile (as determined + by ``zipfile.is_zipfile()``). See ``unpack_archive()`` for an explanation + of the `progress_filter` argument. + """ + + if not zipfile.is_zipfile(filename): + raise UnrecognizedFormat("%s is not a zip file" % (filename,)) + + z = zipfile.ZipFile(filename) + try: + for info in z.infolist(): + name = info.filename + + # don't extract absolute paths or ones with .. in them + if name.startswith('/') or '..' in name: + continue + + target = os.path.join(extract_dir, *name.split('/')) + target = progress_filter(name, target) + if not target: + continue + if name.endswith('/'): + # directory + ensure_directory(target) + else: + # file + ensure_directory(target) + data = z.read(info.filename) + f = open(target,'wb') + try: + f.write(data) + finally: + f.close() + del data + finally: + z.close() + + +def unpack_tarfile(filename, extract_dir, progress_filter=default_filter): + """Unpack tar/tar.gz/tar.bz2 `filename` to `extract_dir` + + Raises ``UnrecognizedFormat`` if `filename` is not a tarfile (as determined + by ``tarfile.open()``). See ``unpack_archive()`` for an explanation + of the `progress_filter` argument. + """ + + try: + tarobj = tarfile.open(filename) + except tarfile.TarError: + raise UnrecognizedFormat( + "%s is not a compressed or uncompressed tar file" % (filename,) + ) + + try: + tarobj.chown = lambda *args: None # don't do any chowning! + for member in tarobj: + if member.isfile() or member.isdir(): + name = member.name + # don't extract absolute paths or ones with .. in them + if not name.startswith('/') and '..' not in name: + dst = os.path.join(extract_dir, *name.split('/')) + dst = progress_filter(name, dst) + if dst: + if dst.endswith(os.sep): + dst = dst[:-1] + try: + tarobj._extract_member(member,dst) # XXX Ugh + except tarfile.ExtractError: + pass # chown/chmod/mkfifo/mknode/makedev failed + return True + finally: + tarobj.close() + + + + +extraction_drivers = unpack_directory, unpack_zipfile, unpack_tarfile + + diff -Nru tahoe-lafs-1.9.2/setuptools-0.6c16dev4.egg/setuptools/command/__init__.py tahoe-lafs-1.10.0/setuptools-0.6c16dev4.egg/setuptools/command/__init__.py --- tahoe-lafs-1.9.2/setuptools-0.6c16dev4.egg/setuptools/command/__init__.py 1970-01-01 00:00:00.000000000 +0000 +++ tahoe-lafs-1.10.0/setuptools-0.6c16dev4.egg/setuptools/command/__init__.py 2013-09-03 15:38:27.000000000 +0000 @@ -0,0 +1,20 @@ +__all__ = [ + 'alias', 'bdist_egg', 'bdist_rpm', 'build_ext', 'build_py', 'develop', + 'easy_install', 'egg_info', 'install', 'install_lib', 'rotate', 'saveopts', + 'sdist', 'setopt', 'test', 'upload', 'install_egg_info', 'install_scripts', + 'register', 'bdist_wininst', 'scriptsetup', +] + +import sys +if sys.version>='2.5': + # In Python 2.5 and above, distutils includes its own upload command + __all__.remove('upload') + + +from distutils.command.bdist import bdist + +if 'egg' not in bdist.format_commands: + bdist.format_command['egg'] = ('bdist_egg', "Python .egg file") + bdist.format_commands.append('egg') + +del bdist, sys diff -Nru tahoe-lafs-1.9.2/setuptools-0.6c16dev4.egg/setuptools/command/alias.py tahoe-lafs-1.10.0/setuptools-0.6c16dev4.egg/setuptools/command/alias.py --- tahoe-lafs-1.9.2/setuptools-0.6c16dev4.egg/setuptools/command/alias.py 1970-01-01 00:00:00.000000000 +0000 +++ tahoe-lafs-1.10.0/setuptools-0.6c16dev4.egg/setuptools/command/alias.py 2013-09-03 15:38:27.000000000 +0000 @@ -0,0 +1,79 @@ +import distutils, os +from setuptools import Command +from distutils.util import convert_path +from distutils import log +from distutils.errors import * +from setuptools.command.setopt import edit_config, option_base, config_file + +def shquote(arg): + """Quote an argument for later parsing by shlex.split()""" + for c in '"', "'", "\\", "#": + if c in arg: return repr(arg) + if arg.split()!=[arg]: + return repr(arg) + return arg + + +class alias(option_base): + """Define a shortcut that invokes one or more commands""" + + description = "define a shortcut to invoke one or more commands" + command_consumes_arguments = True + + user_options = [ + ('remove', 'r', 'remove (unset) the alias'), + ] + option_base.user_options + + boolean_options = option_base.boolean_options + ['remove'] + + def initialize_options(self): + option_base.initialize_options(self) + self.args = None + self.remove = None + + def finalize_options(self): + option_base.finalize_options(self) + if self.remove and len(self.args)!=1: + raise DistutilsOptionError( + "Must specify exactly one argument (the alias name) when " + "using --remove" + ) + + def run(self): + aliases = self.distribution.get_option_dict('aliases') + + if not self.args: + print "Command Aliases" + print "---------------" + for alias in aliases: + print "setup.py alias", format_alias(alias, aliases) + return + + elif len(self.args)==1: + alias, = self.args + if self.remove: + command = None + elif alias in aliases: + print "setup.py alias", format_alias(alias, aliases) + return + else: + print "No alias definition found for %r" % alias + return + else: + alias = self.args[0] + command = ' '.join(map(shquote,self.args[1:])) + + edit_config(self.filename, {'aliases': {alias:command}}, self.dry_run) + + +def format_alias(name, aliases): + source, command = aliases[name] + if source == config_file('global'): + source = '--global-config ' + elif source == config_file('user'): + source = '--user-config ' + elif source == config_file('local'): + source = '' + else: + source = '--filename=%r' % source + return source+name+' '+command diff -Nru tahoe-lafs-1.9.2/setuptools-0.6c16dev4.egg/setuptools/command/bdist_egg.py tahoe-lafs-1.10.0/setuptools-0.6c16dev4.egg/setuptools/command/bdist_egg.py --- tahoe-lafs-1.9.2/setuptools-0.6c16dev4.egg/setuptools/command/bdist_egg.py 1970-01-01 00:00:00.000000000 +0000 +++ tahoe-lafs-1.10.0/setuptools-0.6c16dev4.egg/setuptools/command/bdist_egg.py 2013-09-03 15:38:27.000000000 +0000 @@ -0,0 +1,533 @@ +"""setuptools.command.bdist_egg + +Build .egg distributions""" + +# This module should be kept compatible with Python 2.3 +import sys, os, marshal +from setuptools import Command +from distutils.dir_util import remove_tree, mkpath +from distutils.sysconfig import get_python_version, get_python_lib +from distutils import log +from distutils.errors import DistutilsSetupError +from pkg_resources import get_build_platform, Distribution, ensure_directory +from pkg_resources import EntryPoint +from types import CodeType +from setuptools.extension import Library + +def strip_module(filename): + if '.' in filename: + filename = os.path.splitext(filename)[0] + if filename.endswith('module'): + filename = filename[:-6] + return filename + +def write_stub(resource, pyfile): + f = open(pyfile,'w') + f.write('\n'.join([ + "def __bootstrap__():", + " global __bootstrap__, __loader__, __file__", + " import sys, pkg_resources, imp", + " __file__ = pkg_resources.resource_filename(__name__,%r)" + % resource, + " __loader__ = None; del __bootstrap__, __loader__", + " imp.load_dynamic(__name__,__file__)", + "__bootstrap__()", + "" # terminal \n + ])) + f.close() + +# stub __init__.py for packages distributed without one +NS_PKG_STUB = '__import__("pkg_resources").declare_namespace(__name__)' + +class bdist_egg(Command): + + description = "create an \"egg\" distribution" + + user_options = [ + ('bdist-dir=', 'b', + "temporary directory for creating the distribution"), + ('plat-name=', 'p', + "platform name to embed in generated filenames " + "(default: %s)" % get_build_platform()), + ('exclude-source-files', None, + "remove all .py files from the generated egg"), + ('keep-temp', 'k', + "keep the pseudo-installation tree around after " + + "creating the distribution archive"), + ('dist-dir=', 'd', + "directory to put final built distributions in"), + ('skip-build', None, + "skip rebuilding everything (for testing/debugging)"), + ] + + boolean_options = [ + 'keep-temp', 'skip-build', 'exclude-source-files' + ] + + + + + + + + + + + + + + + + + + def initialize_options (self): + self.bdist_dir = None + self.plat_name = None + self.keep_temp = 0 + self.dist_dir = None + self.skip_build = 0 + self.egg_output = None + self.exclude_source_files = None + + + def finalize_options(self): + ei_cmd = self.ei_cmd = self.get_finalized_command("egg_info") + self.egg_info = ei_cmd.egg_info + + if self.bdist_dir is None: + bdist_base = self.get_finalized_command('bdist').bdist_base + self.bdist_dir = os.path.join(bdist_base, 'egg') + + if self.plat_name is None: + self.plat_name = get_build_platform() + + self.set_undefined_options('bdist',('dist_dir', 'dist_dir')) + + if self.egg_output is None: + + # Compute filename of the output egg + basename = Distribution( + None, None, ei_cmd.egg_name, ei_cmd.egg_version, + get_python_version(), + self.distribution.has_ext_modules() and self.plat_name + ).egg_name() + + self.egg_output = os.path.join(self.dist_dir, basename+'.egg') + + + + + + + + + def do_install_data(self): + # Hack for packages that install data to install's --install-lib + self.get_finalized_command('install').install_lib = self.bdist_dir + + site_packages = os.path.normcase(os.path.realpath(get_python_lib())) + old, self.distribution.data_files = self.distribution.data_files,[] + + for item in old: + if isinstance(item,tuple) and len(item)==2: + if os.path.isabs(item[0]): + realpath = os.path.realpath(item[0]) + normalized = os.path.normcase(realpath) + if normalized==site_packages or normalized.startswith( + site_packages+os.sep + ): + item = realpath[len(site_packages)+1:], item[1] + # XXX else: raise ??? + self.distribution.data_files.append(item) + + try: + log.info("installing package data to %s" % self.bdist_dir) + self.call_command('install_data', force=0, root=None) + finally: + self.distribution.data_files = old + + + def get_outputs(self): + return [self.egg_output] + + + def call_command(self,cmdname,**kw): + """Invoke reinitialized command `cmdname` with keyword args""" + for dirname in INSTALL_DIRECTORY_ATTRS: + kw.setdefault(dirname,self.bdist_dir) + kw.setdefault('skip_build',self.skip_build) + kw.setdefault('dry_run', self.dry_run) + cmd = self.reinitialize_command(cmdname, **kw) + self.run_command(cmdname) + return cmd + + + def run(self): + # Generate metadata first + self.run_command("egg_info") + # We run install_lib before install_data, because some data hacks + # pull their data path from the install_lib command. + log.info("installing library code to %s" % self.bdist_dir) + instcmd = self.get_finalized_command('install') + old_root = instcmd.root; instcmd.root = None + if self.distribution.has_c_libraries() and not self.skip_build: + self.run_command('build_clib') + cmd = self.call_command('install_lib', warn_dir=0) + instcmd.root = old_root + + all_outputs, ext_outputs = self.get_ext_outputs() + self.stubs = [] + to_compile = [] + for (p,ext_name) in enumerate(ext_outputs): + filename,ext = os.path.splitext(ext_name) + pyfile = os.path.join(self.bdist_dir, strip_module(filename)+'.py') + self.stubs.append(pyfile) + log.info("creating stub loader for %s" % ext_name) + if not self.dry_run: + write_stub(os.path.basename(ext_name), pyfile) + to_compile.append(pyfile) + ext_outputs[p] = ext_name.replace(os.sep,'/') + + to_compile.extend(self.make_init_files()) + if to_compile: + cmd.byte_compile(to_compile) + if self.distribution.data_files: + self.do_install_data() + + # Make the EGG-INFO directory + archive_root = self.bdist_dir + egg_info = os.path.join(archive_root,'EGG-INFO') + self.mkpath(egg_info) + if self.distribution.scripts: + script_dir = os.path.join(egg_info, 'scripts') + log.info("installing scripts to %s" % script_dir) + self.call_command('install_scripts',install_dir=script_dir,no_ep=1) + + self.copy_metadata_to(egg_info) + native_libs = os.path.join(egg_info, "native_libs.txt") + if all_outputs: + log.info("writing %s" % native_libs) + if not self.dry_run: + ensure_directory(native_libs) + libs_file = open(native_libs, 'wt') + libs_file.write('\n'.join(all_outputs)) + libs_file.write('\n') + libs_file.close() + elif os.path.isfile(native_libs): + log.info("removing %s" % native_libs) + if not self.dry_run: + os.unlink(native_libs) + + write_safety_flag( + os.path.join(archive_root,'EGG-INFO'), self.zip_safe() + ) + + if os.path.exists(os.path.join(self.egg_info,'depends.txt')): + log.warn( + "WARNING: 'depends.txt' will not be used by setuptools 0.6!\n" + "Use the install_requires/extras_require setup() args instead." + ) + + if self.exclude_source_files: + self.zap_pyfiles() + + # Make the archive + make_zipfile(self.egg_output, archive_root, verbose=self.verbose, + dry_run=self.dry_run, mode=self.gen_header()) + if not self.keep_temp: + remove_tree(self.bdist_dir, dry_run=self.dry_run) + + # Add to 'Distribution.dist_files' so that the "upload" command works + getattr(self.distribution,'dist_files',[]).append( + ('bdist_egg',get_python_version(),self.egg_output)) + + + + + def zap_pyfiles(self): + log.info("Removing .py files from temporary directory") + for base,dirs,files in walk_egg(self.bdist_dir): + for name in files: + if name.endswith('.py'): + path = os.path.join(base,name) + log.debug("Deleting %s", path) + os.unlink(path) + + def zip_safe(self): + safe = getattr(self.distribution,'zip_safe',None) + if safe is not None: + return safe + log.warn("zip_safe flag not set; analyzing archive contents...") + return analyze_egg(self.bdist_dir, self.stubs) + + def make_init_files(self): + """Create missing package __init__ files""" + init_files = [] + for base,dirs,files in walk_egg(self.bdist_dir): + if base==self.bdist_dir: + # don't put an __init__ in the root + continue + for name in files: + if name.endswith('.py'): + if '__init__.py' not in files: + pkg = base[len(self.bdist_dir)+1:].replace(os.sep,'.') + if self.distribution.has_contents_for(pkg): + log.warn("Creating missing __init__.py for %s",pkg) + filename = os.path.join(base,'__init__.py') + if not self.dry_run: + f = open(filename,'w'); f.write(NS_PKG_STUB) + f.close() + init_files.append(filename) + break + else: + # not a package, don't traverse to subdirectories + dirs[:] = [] + + return init_files + + def gen_header(self): + epm = EntryPoint.parse_map(self.distribution.entry_points or '') + ep = epm.get('setuptools.installation',{}).get('eggsecutable') + if ep is None: + return 'w' # not an eggsecutable, do it the usual way. + + if not ep.attrs or ep.extras: + raise DistutilsSetupError( + "eggsecutable entry point (%r) cannot have 'extras' " + "or refer to a module" % (ep,) + ) + + pyver = sys.version[:3] + pkg = ep.module_name + full = '.'.join(ep.attrs) + base = ep.attrs[0] + basename = os.path.basename(self.egg_output) + + header = ( + "#!/bin/sh\n" + 'if [ `basename $0` = "%(basename)s" ]\n' + 'then exec python%(pyver)s -c "' + "import sys, os; sys.path.insert(0, os.path.abspath('$0')); " + "from %(pkg)s import %(base)s; sys.exit(%(full)s())" + '" "$@"\n' + 'else\n' + ' echo $0 is not the correct name for this egg file.\n' + ' echo Please rename it back to %(basename)s and try again.\n' + ' exec false\n' + 'fi\n' + + ) % locals() + + if not self.dry_run: + mkpath(os.path.dirname(self.egg_output), dry_run=self.dry_run) + f = open(self.egg_output, 'w') + f.write(header) + f.close() + return 'a' + + + def copy_metadata_to(self, target_dir): + prefix = os.path.join(self.egg_info,'') + for path in self.ei_cmd.filelist.files: + if path.startswith(prefix): + target = os.path.join(target_dir, path[len(prefix):]) + ensure_directory(target) + self.copy_file(path, target) + + def get_ext_outputs(self): + """Get a list of relative paths to C extensions in the output distro""" + + all_outputs = [] + ext_outputs = [] + + paths = {self.bdist_dir:''} + for base, dirs, files in os.walk(self.bdist_dir): + for filename in files: + if os.path.splitext(filename)[1].lower() in NATIVE_EXTENSIONS: + all_outputs.append(paths[base]+filename) + for filename in dirs: + paths[os.path.join(base,filename)] = paths[base]+filename+'/' + + if self.distribution.has_ext_modules(): + build_cmd = self.get_finalized_command('build_ext') + for ext in build_cmd.extensions: + if isinstance(ext,Library): + continue + fullname = build_cmd.get_ext_fullname(ext.name) + filename = build_cmd.get_ext_filename(fullname) + if not os.path.basename(filename).startswith('dl-'): + if os.path.exists(os.path.join(self.bdist_dir,filename)): + ext_outputs.append(filename) + + return all_outputs, ext_outputs + + +NATIVE_EXTENSIONS = dict.fromkeys('.dll .so .dylib .pyd'.split()) + + + + +def walk_egg(egg_dir): + """Walk an unpacked egg's contents, skipping the metadata directory""" + walker = os.walk(egg_dir) + base,dirs,files = walker.next() + if 'EGG-INFO' in dirs: + dirs.remove('EGG-INFO') + yield base,dirs,files + for bdf in walker: + yield bdf + +def analyze_egg(egg_dir, stubs): + # check for existing flag in EGG-INFO + for flag,fn in safety_flags.items(): + if os.path.exists(os.path.join(egg_dir,'EGG-INFO',fn)): + return flag + if not can_scan(): return False + safe = True + for base, dirs, files in walk_egg(egg_dir): + for name in files: + if name.endswith('.py') or name.endswith('.pyw'): + continue + elif name.endswith('.pyc') or name.endswith('.pyo'): + # always scan, even if we already know we're not safe + safe = scan_module(egg_dir, base, name, stubs) and safe + return safe + +def write_safety_flag(egg_dir, safe): + # Write or remove zip safety flag file(s) + for flag,fn in safety_flags.items(): + fn = os.path.join(egg_dir, fn) + if os.path.exists(fn): + if safe is None or bool(safe)!=flag: + os.unlink(fn) + elif safe is not None and bool(safe)==flag: + f=open(fn,'wb'); f.write('\n'); f.close() + +safety_flags = { + True: 'zip-safe', + False: 'not-zip-safe', +} + +def scan_module(egg_dir, base, name, stubs): + """Check whether module possibly uses unsafe-for-zipfile stuff""" + + filename = os.path.join(base,name) + if filename[:-1] in stubs: + return True # Extension module + pkg = base[len(egg_dir)+1:].replace(os.sep,'.') + module = pkg+(pkg and '.' or '')+os.path.splitext(name)[0] + f = open(filename,'rb'); f.read(8) # skip magic & date + code = marshal.load(f); f.close() + safe = True + symbols = dict.fromkeys(iter_symbols(code)) + for bad in ['__file__', '__path__']: + if bad in symbols: + log.warn("%s: module references %s", module, bad) + safe = False + if 'inspect' in symbols: + for bad in [ + 'getsource', 'getabsfile', 'getsourcefile', 'getfile' + 'getsourcelines', 'findsource', 'getcomments', 'getframeinfo', + 'getinnerframes', 'getouterframes', 'stack', 'trace' + ]: + if bad in symbols: + log.warn("%s: module MAY be using inspect.%s", module, bad) + safe = False + if '__name__' in symbols and '__main__' in symbols and '.' not in module: + if sys.version[:3]=="2.4": # -m works w/zipfiles in 2.5 + log.warn("%s: top-level module may be 'python -m' script", module) + safe = False + return safe + +def iter_symbols(code): + """Yield names and strings used by `code` and its nested code objects""" + for name in code.co_names: yield name + for const in code.co_consts: + if isinstance(const,basestring): + yield const + elif isinstance(const,CodeType): + for name in iter_symbols(const): + yield name + +def can_scan(): + if not sys.platform.startswith('java') and sys.platform != 'cli': + # CPython, PyPy, etc. + return True + log.warn("Unable to analyze compiled code on this platform.") + log.warn("Please ask the author to include a 'zip_safe'" + " setting (either True or False) in the package's setup.py") + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +# Attribute names of options for commands that might need to be convinced to +# install to the egg build directory + +INSTALL_DIRECTORY_ATTRS = [ + 'install_lib', 'install_dir', 'install_data', 'install_base' +] + +def make_zipfile(zip_filename, base_dir, verbose=0, dry_run=0, compress=None, + mode='w' +): + """Create a zip file from all the files under 'base_dir'. The output + zip file will be named 'base_dir' + ".zip". Uses either the "zipfile" + Python module (if available) or the InfoZIP "zip" utility (if installed + and found on the default search path). If neither tool is available, + raises DistutilsExecError. Returns the name of the output zip file. + """ + import zipfile + mkpath(os.path.dirname(zip_filename), dry_run=dry_run) + log.info("creating '%s' and adding '%s' to it", zip_filename, base_dir) + + def visit(z, dirname, names): + for name in names: + path = os.path.normpath(os.path.join(dirname, name)) + if os.path.isfile(path): + p = path[len(base_dir)+1:] + if not dry_run: + z.write(path, p) + log.debug("adding '%s'" % p) + + if compress is None: + compress = (sys.version>="2.4") # avoid 2.3 zipimport bug when 64 bits + + compression = [zipfile.ZIP_STORED, zipfile.ZIP_DEFLATED][bool(compress)] + if not dry_run: + z = zipfile.ZipFile(zip_filename, mode, compression=compression) + os.path.walk(base_dir, visit, z) + z.close() + else: + os.path.walk(base_dir, visit, None) + return zip_filename +# diff -Nru tahoe-lafs-1.9.2/setuptools-0.6c16dev4.egg/setuptools/command/bdist_rpm.py tahoe-lafs-1.10.0/setuptools-0.6c16dev4.egg/setuptools/command/bdist_rpm.py --- tahoe-lafs-1.9.2/setuptools-0.6c16dev4.egg/setuptools/command/bdist_rpm.py 1970-01-01 00:00:00.000000000 +0000 +++ tahoe-lafs-1.10.0/setuptools-0.6c16dev4.egg/setuptools/command/bdist_rpm.py 2013-09-03 15:38:27.000000000 +0000 @@ -0,0 +1,82 @@ +# This is just a kludge so that bdist_rpm doesn't guess wrong about the +# distribution name and version, if the egg_info command is going to alter +# them, another kludge to allow you to build old-style non-egg RPMs, and +# finally, a kludge to track .rpm files for uploading when run on Python <2.5. + +from distutils.command.bdist_rpm import bdist_rpm as _bdist_rpm +import sys, os + +class bdist_rpm(_bdist_rpm): + + def initialize_options(self): + _bdist_rpm.initialize_options(self) + self.no_egg = None + + if sys.version<"2.5": + # Track for uploading any .rpm file(s) moved to self.dist_dir + def move_file(self, src, dst, level=1): + _bdist_rpm.move_file(self, src, dst, level) + if dst==self.dist_dir and src.endswith('.rpm'): + getattr(self.distribution,'dist_files',[]).append( + ('bdist_rpm', + src.endswith('.src.rpm') and 'any' or sys.version[:3], + os.path.join(dst, os.path.basename(src))) + ) + + def run(self): + self.run_command('egg_info') # ensure distro name is up-to-date + _bdist_rpm.run(self) + + + + + + + + + + + + + + def _make_spec_file(self): + version = self.distribution.get_version() + rpmversion = version.replace('-','_') + spec = _bdist_rpm._make_spec_file(self) + line23 = '%define version '+version + line24 = '%define version '+rpmversion + spec = [ + line.replace( + "Source0: %{name}-%{version}.tar", + "Source0: %{name}-%{unmangled_version}.tar" + ).replace( + "setup.py install ", + "setup.py install --single-version-externally-managed " + ).replace( + "%setup", + "%setup -n %{name}-%{unmangled_version}" + ).replace(line23,line24) + for line in spec + ] + spec.insert(spec.index(line24)+1, "%define unmangled_version "+version) + return spec + + + + + + + + + + + + + + + + + + + + diff -Nru tahoe-lafs-1.9.2/setuptools-0.6c16dev4.egg/setuptools/command/bdist_wininst.py tahoe-lafs-1.10.0/setuptools-0.6c16dev4.egg/setuptools/command/bdist_wininst.py --- tahoe-lafs-1.9.2/setuptools-0.6c16dev4.egg/setuptools/command/bdist_wininst.py 1970-01-01 00:00:00.000000000 +0000 +++ tahoe-lafs-1.10.0/setuptools-0.6c16dev4.egg/setuptools/command/bdist_wininst.py 2013-09-03 15:38:27.000000000 +0000 @@ -0,0 +1,82 @@ +from distutils.command.bdist_wininst import bdist_wininst as _bdist_wininst +import os, sys + +class bdist_wininst(_bdist_wininst): + _good_upload = _bad_upload = None + + def create_exe(self, arcname, fullname, bitmap=None): + _bdist_wininst.create_exe(self, arcname, fullname, bitmap) + installer_name = self.get_installer_filename(fullname) + if self.target_version: + pyversion = self.target_version + # fix 2.5+ bdist_wininst ignoring --target-version spec + self._bad_upload = ('bdist_wininst', 'any', installer_name) + else: + pyversion = 'any' + self._good_upload = ('bdist_wininst', pyversion, installer_name) + + def _fix_upload_names(self): + good, bad = self._good_upload, self._bad_upload + dist_files = getattr(self.distribution, 'dist_files', []) + if bad in dist_files: + dist_files.remove(bad) + if good not in dist_files: + dist_files.append(good) + + def reinitialize_command (self, command, reinit_subcommands=0): + cmd = self.distribution.reinitialize_command( + command, reinit_subcommands) + if command in ('install', 'install_lib'): + cmd.install_lib = None # work around distutils bug + return cmd + + def run(self): + self._is_running = True + try: + _bdist_wininst.run(self) + self._fix_upload_names() + finally: + self._is_running = False + + + if not hasattr(_bdist_wininst, 'get_installer_filename'): + def get_installer_filename(self, fullname): + # Factored out to allow overriding in subclasses + if self.target_version: + # if we create an installer for a specific python version, + # it's better to include this in the name + installer_name = os.path.join(self.dist_dir, + "%s.win32-py%s.exe" % + (fullname, self.target_version)) + else: + installer_name = os.path.join(self.dist_dir, + "%s.win32.exe" % fullname) + return installer_name + # get_installer_filename() + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru tahoe-lafs-1.9.2/setuptools-0.6c16dev4.egg/setuptools/command/build_ext.py tahoe-lafs-1.10.0/setuptools-0.6c16dev4.egg/setuptools/command/build_ext.py --- tahoe-lafs-1.9.2/setuptools-0.6c16dev4.egg/setuptools/command/build_ext.py 1970-01-01 00:00:00.000000000 +0000 +++ tahoe-lafs-1.10.0/setuptools-0.6c16dev4.egg/setuptools/command/build_ext.py 2013-09-03 15:38:27.000000000 +0000 @@ -0,0 +1,285 @@ +from distutils.command.build_ext import build_ext as _du_build_ext +try: + # Attempt to use Pyrex for building extensions, if available + from Pyrex.Distutils.build_ext import build_ext as _build_ext +except ImportError: + _build_ext = _du_build_ext + +import os, sys +from distutils.file_util import copy_file +from setuptools.extension import Library +from distutils.ccompiler import new_compiler +from distutils.sysconfig import customize_compiler, get_config_var +get_config_var("LDSHARED") # make sure _config_vars is initialized +from distutils.sysconfig import _config_vars +from distutils import log +from distutils.errors import * + +have_rtld = False +use_stubs = False +libtype = 'shared' + +if sys.platform == "darwin": + use_stubs = True +elif os.name != 'nt': + try: + from dl import RTLD_NOW + have_rtld = True + use_stubs = True + except ImportError: + pass + +def if_dl(s): + if have_rtld: + return s + return '' + + + + + + +class build_ext(_build_ext): + def run(self): + """Build extensions in build directory, then copy if --inplace""" + old_inplace, self.inplace = self.inplace, 0 + _build_ext.run(self) + self.inplace = old_inplace + if old_inplace: + self.copy_extensions_to_source() + + def copy_extensions_to_source(self): + build_py = self.get_finalized_command('build_py') + for ext in self.extensions: + fullname = self.get_ext_fullname(ext.name) + filename = self.get_ext_filename(fullname) + modpath = fullname.split('.') + package = '.'.join(modpath[:-1]) + package_dir = build_py.get_package_dir(package) + dest_filename = os.path.join(package_dir,os.path.basename(filename)) + src_filename = os.path.join(self.build_lib,filename) + + # Always copy, even if source is older than destination, to ensure + # that the right extensions for the current Python/platform are + # used. + copy_file( + src_filename, dest_filename, verbose=self.verbose, + dry_run=self.dry_run + ) + if ext._needs_stub: + self.write_stub(package_dir or os.curdir, ext, True) + + + if _build_ext is not _du_build_ext and not hasattr(_build_ext,'pyrex_sources'): + # Workaround for problems using some Pyrex versions w/SWIG and/or 2.4 + def swig_sources(self, sources, *otherargs): + # first do any Pyrex processing + sources = _build_ext.swig_sources(self, sources) or sources + # Then do any actual SWIG stuff on the remainder + return _du_build_ext.swig_sources(self, sources, *otherargs) + + + + def get_ext_filename(self, fullname): + filename = _build_ext.get_ext_filename(self,fullname) + if fullname in self.ext_map: + ext = self.ext_map[fullname] + if isinstance(ext,Library): + fn, ext = os.path.splitext(filename) + return self.shlib_compiler.library_filename(fn,libtype) + elif use_stubs and ext._links_to_dynamic: + d,fn = os.path.split(filename) + return os.path.join(d,'dl-'+fn) + return filename + + def initialize_options(self): + _build_ext.initialize_options(self) + self.shlib_compiler = None + self.shlibs = [] + self.ext_map = {} + + def finalize_options(self): + _build_ext.finalize_options(self) + self.extensions = self.extensions or [] + self.check_extensions_list(self.extensions) + self.shlibs = [ext for ext in self.extensions + if isinstance(ext,Library)] + if self.shlibs: + self.setup_shlib_compiler() + for ext in self.extensions: + ext._full_name = self.get_ext_fullname(ext.name) + for ext in self.extensions: + fullname = ext._full_name + self.ext_map[fullname] = ext + ltd = ext._links_to_dynamic = \ + self.shlibs and self.links_to_dynamic(ext) or False + ext._needs_stub = ltd and use_stubs and not isinstance(ext,Library) + filename = ext._file_name = self.get_ext_filename(fullname) + libdir = os.path.dirname(os.path.join(self.build_lib,filename)) + if ltd and libdir not in ext.library_dirs: + ext.library_dirs.append(libdir) + if ltd and use_stubs and os.curdir not in ext.runtime_library_dirs: + ext.runtime_library_dirs.append(os.curdir) + + def setup_shlib_compiler(self): + compiler = self.shlib_compiler = new_compiler( + compiler=self.compiler, dry_run=self.dry_run, force=self.force + ) + if sys.platform == "darwin": + tmp = _config_vars.copy() + try: + # XXX Help! I don't have any idea whether these are right... + _config_vars['LDSHARED'] = "gcc -Wl,-x -dynamiclib -undefined dynamic_lookup" + _config_vars['CCSHARED'] = " -dynamiclib" + _config_vars['SO'] = ".dylib" + customize_compiler(compiler) + finally: + _config_vars.clear() + _config_vars.update(tmp) + else: + customize_compiler(compiler) + + if self.include_dirs is not None: + compiler.set_include_dirs(self.include_dirs) + if self.define is not None: + # 'define' option is a list of (name,value) tuples + for (name,value) in self.define: + compiler.define_macro(name, value) + if self.undef is not None: + for macro in self.undef: + compiler.undefine_macro(macro) + if self.libraries is not None: + compiler.set_libraries(self.libraries) + if self.library_dirs is not None: + compiler.set_library_dirs(self.library_dirs) + if self.rpath is not None: + compiler.set_runtime_library_dirs(self.rpath) + if self.link_objects is not None: + compiler.set_link_objects(self.link_objects) + + # hack so distutils' build_extension() builds a library instead + compiler.link_shared_object = link_shared_object.__get__(compiler) + + + + def get_export_symbols(self, ext): + if isinstance(ext,Library): + return ext.export_symbols + return _build_ext.get_export_symbols(self,ext) + + def build_extension(self, ext): + _compiler = self.compiler + try: + if isinstance(ext,Library): + self.compiler = self.shlib_compiler + _build_ext.build_extension(self,ext) + if ext._needs_stub: + self.write_stub( + self.get_finalized_command('build_py').build_lib, ext + ) + finally: + self.compiler = _compiler + + def links_to_dynamic(self, ext): + """Return true if 'ext' links to a dynamic lib in the same package""" + # XXX this should check to ensure the lib is actually being built + # XXX as dynamic, and not just using a locally-found version or a + # XXX static-compiled version + libnames = dict.fromkeys([lib._full_name for lib in self.shlibs]) + pkg = '.'.join(ext._full_name.split('.')[:-1]+['']) + for libname in ext.libraries: + if pkg+libname in libnames: return True + return False + + def get_outputs(self): + outputs = _build_ext.get_outputs(self) + optimize = self.get_finalized_command('build_py').optimize + for ext in self.extensions: + if ext._needs_stub: + base = os.path.join(self.build_lib, *ext._full_name.split('.')) + outputs.append(base+'.py') + outputs.append(base+'.pyc') + if optimize: + outputs.append(base+'.pyo') + return outputs + + def write_stub(self, output_dir, ext, compile=False): + log.info("writing stub loader for %s to %s",ext._full_name, output_dir) + stub_file = os.path.join(output_dir, *ext._full_name.split('.'))+'.py' + if compile and os.path.exists(stub_file): + raise DistutilsError(stub_file+" already exists! Please delete.") + if not self.dry_run: + f = open(stub_file,'w') + f.write('\n'.join([ + "def __bootstrap__():", + " global __bootstrap__, __file__, __loader__", + " import sys, os, pkg_resources, imp"+if_dl(", dl"), + " __file__ = pkg_resources.resource_filename(__name__,%r)" + % os.path.basename(ext._file_name), + " del __bootstrap__", + " if '__loader__' in globals():", + " del __loader__", + if_dl(" old_flags = sys.getdlopenflags()"), + " old_dir = os.getcwd()", + " try:", + " os.chdir(os.path.dirname(__file__))", + if_dl(" sys.setdlopenflags(dl.RTLD_NOW)"), + " imp.load_dynamic(__name__,__file__)", + " finally:", + if_dl(" sys.setdlopenflags(old_flags)"), + " os.chdir(old_dir)", + "__bootstrap__()", + "" # terminal \n + ])) + f.close() + if compile: + from distutils.util import byte_compile + byte_compile([stub_file], optimize=0, + force=True, dry_run=self.dry_run) + optimize = self.get_finalized_command('install_lib').optimize + if optimize > 0: + byte_compile([stub_file], optimize=optimize, + force=True, dry_run=self.dry_run) + if os.path.exists(stub_file) and not self.dry_run: + os.unlink(stub_file) + + +if use_stubs or os.name=='nt': + # Build shared libraries + # + def link_shared_object(self, objects, output_libname, output_dir=None, + libraries=None, library_dirs=None, runtime_library_dirs=None, + export_symbols=None, debug=0, extra_preargs=None, + extra_postargs=None, build_temp=None, target_lang=None + ): self.link( + self.SHARED_LIBRARY, objects, output_libname, + output_dir, libraries, library_dirs, runtime_library_dirs, + export_symbols, debug, extra_preargs, extra_postargs, + build_temp, target_lang + ) +else: + # Build static libraries everywhere else + libtype = 'static' + + def link_shared_object(self, objects, output_libname, output_dir=None, + libraries=None, library_dirs=None, runtime_library_dirs=None, + export_symbols=None, debug=0, extra_preargs=None, + extra_postargs=None, build_temp=None, target_lang=None + ): + # XXX we need to either disallow these attrs on Library instances, + # or warn/abort here if set, or something... + #libraries=None, library_dirs=None, runtime_library_dirs=None, + #export_symbols=None, extra_preargs=None, extra_postargs=None, + #build_temp=None + + assert output_dir is None # distutils build_ext doesn't pass this + output_dir,filename = os.path.split(output_libname) + basename, ext = os.path.splitext(filename) + if self.library_filename("x").startswith('lib'): + # strip 'lib' prefix; this is kludgy if some platform uses + # a different prefix + basename = basename[3:] + + self.create_static_lib( + objects, basename, output_dir, debug, target_lang + ) diff -Nru tahoe-lafs-1.9.2/setuptools-0.6c16dev4.egg/setuptools/command/build_py.py tahoe-lafs-1.10.0/setuptools-0.6c16dev4.egg/setuptools/command/build_py.py --- tahoe-lafs-1.9.2/setuptools-0.6c16dev4.egg/setuptools/command/build_py.py 1970-01-01 00:00:00.000000000 +0000 +++ tahoe-lafs-1.10.0/setuptools-0.6c16dev4.egg/setuptools/command/build_py.py 2013-09-03 15:38:27.000000000 +0000 @@ -0,0 +1,211 @@ +import os.path, sys, fnmatch +from distutils.command.build_py import build_py as _build_py +from distutils.util import convert_path +from glob import glob + +class build_py(_build_py): + """Enhanced 'build_py' command that includes data files with packages + + The data files are specified via a 'package_data' argument to 'setup()'. + See 'setuptools.dist.Distribution' for more details. + + Also, this version of the 'build_py' command allows you to specify both + 'py_modules' and 'packages' in the same setup operation. + """ + def finalize_options(self): + _build_py.finalize_options(self) + self.package_data = self.distribution.package_data + self.exclude_package_data = self.distribution.exclude_package_data or {} + if 'data_files' in self.__dict__: del self.__dict__['data_files'] + + def run(self): + self.old_run() + if sys.platform == "win32": + from setuptools.command.scriptsetup import do_scriptsetup + do_scriptsetup() + + def old_run(self): + """Build modules, packages, and copy data files to build directory""" + if not self.py_modules and not self.packages: + return + + if self.py_modules: + self.build_modules() + + if self.packages: + self.build_packages() + self.build_package_data() + + # Only compile actual .py files, using our base class' idea of what our + # output files are. + self.byte_compile(_build_py.get_outputs(self, include_bytecode=0)) + + def __getattr__(self,attr): + if attr=='data_files': # lazily compute data files + self.data_files = files = self._get_data_files(); return files + return _build_py.__getattr__(self,attr) + + def _get_data_files(self): + """Generate list of '(package,src_dir,build_dir,filenames)' tuples""" + self.analyze_manifest() + data = [] + for package in self.packages or (): + # Locate package source directory + src_dir = self.get_package_dir(package) + + # Compute package build directory + build_dir = os.path.join(*([self.build_lib] + package.split('.'))) + + # Length of path to strip from found files + plen = len(src_dir)+1 + + # Strip directory from globbed filenames + filenames = [ + file[plen:] for file in self.find_data_files(package, src_dir) + ] + data.append( (package, src_dir, build_dir, filenames) ) + return data + + def find_data_files(self, package, src_dir): + """Return filenames for package's data files in 'src_dir'""" + globs = (self.package_data.get('', []) + + self.package_data.get(package, [])) + files = self.manifest_files.get(package, [])[:] + for pattern in globs: + # Each pattern has to be converted to a platform-specific path + files.extend(glob(os.path.join(src_dir, convert_path(pattern)))) + return self.exclude_data_files(package, src_dir, files) + + def build_package_data(self): + """Copy data files into build directory""" + lastdir = None + for package, src_dir, build_dir, filenames in self.data_files: + for filename in filenames: + target = os.path.join(build_dir, filename) + self.mkpath(os.path.dirname(target)) + self.copy_file(os.path.join(src_dir, filename), target) + + + def analyze_manifest(self): + self.manifest_files = mf = {} + if not self.distribution.include_package_data: + return + src_dirs = {} + for package in self.packages or (): + # Locate package source directory + src_dirs[assert_relative(self.get_package_dir(package))] = package + + self.run_command('egg_info') + ei_cmd = self.get_finalized_command('egg_info') + for path in ei_cmd.filelist.files: + d,f = os.path.split(assert_relative(path)) + prev = None + oldf = f + while d and d!=prev and d not in src_dirs: + prev = d + d, df = os.path.split(d) + f = os.path.join(df, f) + if d in src_dirs: + if path.endswith('.py') and f==oldf: + continue # it's a module, not data + mf.setdefault(src_dirs[d],[]).append(path) + + def get_data_files(self): pass # kludge 2.4 for lazy computation + + if sys.version<"2.4": # Python 2.4 already has this code + def get_outputs(self, include_bytecode=1): + """Return complete list of files copied to the build directory + + This includes both '.py' files and data files, as well as '.pyc' + and '.pyo' files if 'include_bytecode' is true. (This method is + needed for the 'install_lib' command to do its job properly, and to + generate a correct installation manifest.) + """ + return _build_py.get_outputs(self, include_bytecode) + [ + os.path.join(build_dir, filename) + for package, src_dir, build_dir,filenames in self.data_files + for filename in filenames + ] + + def check_package(self, package, package_dir): + """Check namespace packages' __init__ for declare_namespace""" + try: + return self.packages_checked[package] + except KeyError: + pass + + init_py = _build_py.check_package(self, package, package_dir) + self.packages_checked[package] = init_py + + if not init_py or not self.distribution.namespace_packages: + return init_py + + for pkg in self.distribution.namespace_packages: + if pkg==package or pkg.startswith(package+'.'): + break + else: + return init_py + + f = open(init_py,'rU') + if 'declare_namespace' not in f.read(): + from distutils.errors import DistutilsError + raise DistutilsError( + "Namespace package problem: %s is a namespace package, but its\n" + "__init__.py does not call declare_namespace()! Please fix it.\n" + '(See the setuptools manual under "Namespace Packages" for ' + "details.)\n" % (package,) + ) + f.close() + return init_py + + def initialize_options(self): + self.packages_checked={} + _build_py.initialize_options(self) + + + + + + + + def exclude_data_files(self, package, src_dir, files): + """Filter filenames for package's data files in 'src_dir'""" + globs = (self.exclude_package_data.get('', []) + + self.exclude_package_data.get(package, [])) + bad = [] + for pattern in globs: + bad.extend( + fnmatch.filter( + files, os.path.join(src_dir, convert_path(pattern)) + ) + ) + bad = dict.fromkeys(bad) + seen = {} + return [ + f for f in files if f not in bad + and f not in seen and seen.setdefault(f,1) # ditch dupes + ] + + +def assert_relative(path): + if not os.path.isabs(path): + return path + from distutils.errors import DistutilsSetupError + raise DistutilsSetupError( +"""Error: setup script specifies an absolute path: + + %s + +setup() arguments must *always* be /-separated paths relative to the +setup.py directory, *never* absolute paths. +""" % path + ) + + + + + + + + + diff -Nru tahoe-lafs-1.9.2/setuptools-0.6c16dev4.egg/setuptools/command/develop.py tahoe-lafs-1.10.0/setuptools-0.6c16dev4.egg/setuptools/command/develop.py --- tahoe-lafs-1.9.2/setuptools-0.6c16dev4.egg/setuptools/command/develop.py 1970-01-01 00:00:00.000000000 +0000 +++ tahoe-lafs-1.10.0/setuptools-0.6c16dev4.egg/setuptools/command/develop.py 2013-09-03 15:38:27.000000000 +0000 @@ -0,0 +1,165 @@ +from setuptools.command.easy_install import easy_install +from distutils.util import convert_path +from pkg_resources import Distribution, PathMetadata, normalize_path +from distutils import log +from distutils.errors import * +import sys, os, setuptools, glob + +class develop(easy_install): + """Set up package for development""" + + description = "install package in 'development mode'" + + user_options = easy_install.user_options + [ + ("uninstall", "u", "Uninstall this source package"), + ("egg-path=", None, "Set the path to be used in the .egg-link file"), + ] + + boolean_options = easy_install.boolean_options + ['uninstall'] + + command_consumes_arguments = False # override base + + def run(self): + self.old_run() + if sys.platform == "win32": + from setuptools.command.scriptsetup import do_scriptsetup + do_scriptsetup() + + def old_run(self): + if self.uninstall: + self.multi_version = True + self.uninstall_link() + else: + self.install_for_development() + self.warn_deprecated_options() + + def initialize_options(self): + self.uninstall = None + self.egg_path = None + easy_install.initialize_options(self) + self.setup_path = None + self.always_copy_from = '.' # always copy eggs installed in curdir + + def finalize_options(self): + ei = self.get_finalized_command("egg_info") + if ei.broken_egg_info: + raise DistutilsError( + "Please rename %r to %r before using 'develop'" + % (ei.egg_info, ei.broken_egg_info) + ) + self.args = [ei.egg_name] + easy_install.finalize_options(self) + # pick up setup-dir .egg files only: no .egg-info + self.package_index.scan(glob.glob('*.egg')) + + self.egg_link = os.path.join(self.install_dir, ei.egg_name+'.egg-link') + self.egg_base = ei.egg_base + if self.egg_path is None: + self.egg_path = os.path.abspath(ei.egg_base) + + target = normalize_path(self.egg_base) + if normalize_path(os.path.join(self.install_dir, self.egg_path)) != target: + raise DistutilsOptionError( + "--egg-path must be a relative path from the install" + " directory to "+target + ) + + # Make a distribution for the package's source + self.dist = Distribution( + target, + PathMetadata(target, os.path.abspath(ei.egg_info)), + project_name = ei.egg_name + ) + + p = self.egg_base.replace(os.sep,'/') + if p!= os.curdir: + p = '../' * (p.count('/')+1) + self.setup_path = p + p = normalize_path(os.path.join(self.install_dir, self.egg_path, p)) + if p != normalize_path(os.curdir): + raise DistutilsOptionError( + "Can't get a consistent path to setup script from" + " installation directory", p, normalize_path(os.curdir)) + + def install_for_development(self): + # Ensure metadata is up-to-date + self.run_command('egg_info') + # Build extensions in-place + self.reinitialize_command('build_ext', inplace=1) + self.run_command('build_ext') + self.install_site_py() # ensure that target dir is site-safe + if setuptools.bootstrap_install_from: + self.easy_install(setuptools.bootstrap_install_from) + setuptools.bootstrap_install_from = None + + # create an .egg-link in the installation dir, pointing to our egg + log.info("Creating %s (link to %s)", self.egg_link, self.egg_base) + if not self.dry_run: + f = open(self.egg_link,"w") + f.write(self.egg_path + "\n" + self.setup_path) + f.close() + # postprocess the installed distro, fixing up .pth, installing scripts, + # and handling requirements + self.process_distribution(None, self.dist, not self.no_deps) + + + def uninstall_link(self): + if os.path.exists(self.egg_link): + log.info("Removing %s (link to %s)", self.egg_link, self.egg_base) + contents = [line.rstrip() for line in file(self.egg_link)] + if contents not in ([self.egg_path], [self.egg_path, self.setup_path]): + log.warn("Link points to %s: uninstall aborted", contents) + return + if not self.dry_run: + os.unlink(self.egg_link) + if not self.dry_run: + self.update_pth(self.dist) # remove any .pth link to us + if self.distribution.scripts: + # XXX should also check for entry point scripts! + log.warn("Note: you must uninstall or replace scripts manually!") + + + + + + def install_egg_scripts(self, dist): + if dist is not self.dist: + # Installing a dependency, so fall back to normal behavior + return easy_install.install_egg_scripts(self,dist) + + # create wrapper scripts in the script dir, pointing to dist.scripts + + # new-style... + self.install_wrapper_scripts(dist) + + # ...and old-style + for script_name in self.distribution.scripts or []: + script_path = os.path.abspath(convert_path(script_name)) + script_name = os.path.basename(script_path) + f = open(script_path,'rU') + script_text = f.read() + f.close() + self.install_script(dist, script_name, script_text, script_path) + + + + + + + + + + + + + + + + + + + + + + + diff -Nru tahoe-lafs-1.9.2/setuptools-0.6c16dev4.egg/setuptools/command/easy_install.py tahoe-lafs-1.10.0/setuptools-0.6c16dev4.egg/setuptools/command/easy_install.py --- tahoe-lafs-1.9.2/setuptools-0.6c16dev4.egg/setuptools/command/easy_install.py 1970-01-01 00:00:00.000000000 +0000 +++ tahoe-lafs-1.10.0/setuptools-0.6c16dev4.egg/setuptools/command/easy_install.py 2013-09-03 15:38:27.000000000 +0000 @@ -0,0 +1,1739 @@ +#!python +"""\ +Easy Install +------------ + +A tool for doing automatic download/extract/build of distutils-based Python +packages. For detailed documentation, see the accompanying EasyInstall.txt +file, or visit the `EasyInstall home page`__. + +__ http://peak.telecommunity.com/DevCenter/EasyInstall +""" +import sys, os.path, zipimport, shutil, tempfile, zipfile, re, stat, random +from glob import glob +from setuptools import Command, _dont_write_bytecode +from setuptools import __version__ as setuptools_version +from setuptools.sandbox import run_setup +from distutils import log, dir_util +from distutils.sysconfig import get_python_lib +from distutils.errors import DistutilsArgError, DistutilsOptionError, \ + DistutilsError +from setuptools.archive_util import unpack_archive +from setuptools.package_index import PackageIndex, parse_bdist_wininst +from setuptools.package_index import URL_SCHEME +from setuptools.command import bdist_egg, egg_info +from pkg_resources import * +sys_executable = os.path.normpath(sys.executable) + +__all__ = [ + 'samefile', 'easy_install', 'PthDistributions', 'extract_wininst_cfg', + 'main', 'get_exe_prefixes', +] + +def samefile(p1,p2): + if hasattr(os.path,'samefile') and ( + os.path.exists(p1) and os.path.exists(p2) + ): + return os.path.samefile(p1,p2) + return ( + os.path.normpath(os.path.normcase(p1)) == + os.path.normpath(os.path.normcase(p2)) + ) + +class easy_install(Command): + """Manage a download/build/install process""" + description = "Find/get/install Python packages" + command_consumes_arguments = True + + user_options = [ + ('prefix=', None, "installation prefix"), + ("zip-ok", "z", "install package as a zipfile"), + ("multi-version", "m", "make apps have to require() a version"), + ("upgrade", "U", "force upgrade (searches PyPI for latest versions)"), + ("install-dir=", "d", "install package to DIR"), + ("script-dir=", "s", "install scripts to DIR"), + ("exclude-scripts", "x", "Don't install scripts"), + ("always-copy", "a", "Copy all needed packages to install dir"), + ("index-url=", "i", "base URL of Python Package Index"), + ("find-links=", "f", "additional URL(s) to search for packages"), + ("delete-conflicting", "D", "no longer needed; don't use this"), + ("ignore-conflicts-at-my-risk", None, + "no longer needed; don't use this"), + ("build-directory=", "b", + "download/extract/build in DIR; keep the results"), + ('optimize=', 'O', + "also compile with optimization: -O1 for \"python -O\", " + "-O2 for \"python -OO\", and -O0 to disable [default: -O0]"), + ('record=', None, + "filename in which to record list of installed files"), + ('always-unzip', 'Z', "don't install as a zipfile, no matter what"), + ('site-dirs=','S',"list of directories where .pth files work"), + ('editable', 'e', "Install specified packages in editable form"), + ('no-deps', 'N', "don't install dependencies"), + ('allow-hosts=', 'H', "pattern(s) that hostnames must match"), + ('local-snapshots-ok', 'l', "allow building eggs from local checkouts"), + ] + boolean_options = [ + 'zip-ok', 'multi-version', 'exclude-scripts', 'upgrade', 'always-copy', + 'delete-conflicting', 'ignore-conflicts-at-my-risk', 'editable', + 'no-deps', 'local-snapshots-ok', + ] + negative_opt = {'always-unzip': 'zip-ok'} + create_index = PackageIndex + + def initialize_options(self): + self.zip_ok = self.local_snapshots_ok = None + self.install_dir = self.script_dir = self.exclude_scripts = None + self.index_url = None + self.find_links = None + self.build_directory = None + self.args = None + self.optimize = self.record = None + self.upgrade = self.always_copy = self.multi_version = None + self.editable = self.no_deps = self.allow_hosts = None + self.root = self.prefix = self.no_report = None + + # Options not specifiable via command line + self.package_index = None + self.pth_file = self.always_copy_from = None + self.delete_conflicting = None + self.ignore_conflicts_at_my_risk = None + self.site_dirs = None + self.installed_projects = {} + self.sitepy_installed = False + # Always read easy_install options, even if we are subclassed, or have + # an independent instance created. This ensures that defaults will + # always come from the standard configuration file(s)' "easy_install" + # section, even if this is a "develop" or "install" command, or some + # other embedding. + self._dry_run = None + self.verbose = self.distribution.verbose + self.distribution._set_command_options( + self, self.distribution.get_option_dict('easy_install') + ) + + def delete_blockers(self, blockers): + for filename in blockers: + if os.path.exists(filename) or os.path.islink(filename): + log.info("Deleting %s", filename) + if not self.dry_run: + if os.path.isdir(filename) and not os.path.islink(filename): + rmtree(filename) + else: + os.unlink(filename) + + def finalize_options(self): + self._expand('install_dir','script_dir','build_directory','site_dirs') + # If a non-default installation directory was specified, default the + # script directory to match it. + if self.script_dir is None: + self.script_dir = self.install_dir + + # Let install_dir get set by install_lib command, which in turn + # gets its info from the install command, and takes into account + # --prefix and --home and all that other crud. + self.set_undefined_options('install_lib', + ('install_dir','install_dir') + ) + # Likewise, set default script_dir from 'install_scripts.install_dir' + self.set_undefined_options('install_scripts', + ('install_dir', 'script_dir') + ) + # default --record from the install command + self.set_undefined_options('install', ('record', 'record')) + normpath = map(normalize_path, sys.path) + self.all_site_dirs = get_site_dirs() + if self.site_dirs is not None: + site_dirs = [ + os.path.expanduser(s.strip()) for s in self.site_dirs.split(',') + ] + for d in site_dirs: + if not os.path.isdir(d): + log.warn("%s (in --site-dirs) does not exist", d) + elif normalize_path(d) not in normpath: + raise DistutilsOptionError( + d+" (in --site-dirs) is not on sys.path" + ) + else: + self.all_site_dirs.append(normalize_path(d)) + if not self.editable: self.check_site_dir() + self.index_url = self.index_url or "http://pypi.python.org/simple" + self.shadow_path = self.all_site_dirs[:] + for path_item in self.install_dir, normalize_path(self.script_dir): + if path_item not in self.shadow_path: + self.shadow_path.insert(0, path_item) + + if self.allow_hosts is not None: + hosts = [s.strip() for s in self.allow_hosts.split(',')] + else: + hosts = ['*'] + if self.package_index is None: + self.package_index = self.create_index( + self.index_url, search_path = self.shadow_path+sys.path, hosts=hosts, + ) + self.local_index = Environment(self.shadow_path+sys.path) + + if self.find_links is not None: + if isinstance(self.find_links, basestring): + self.find_links = self.find_links.split() + else: + self.find_links = [] + if self.local_snapshots_ok: + self.package_index.scan_egg_links(self.shadow_path+sys.path) + self.package_index.add_find_links(self.find_links) + self.set_undefined_options('install_lib', ('optimize','optimize')) + if not isinstance(self.optimize,int): + try: + self.optimize = int(self.optimize) + if not (0 <= self.optimize <= 2): raise ValueError + except ValueError: + raise DistutilsOptionError("--optimize must be 0, 1, or 2") + + if self.delete_conflicting and self.ignore_conflicts_at_my_risk: + raise DistutilsOptionError( + "Can't use both --delete-conflicting and " + "--ignore-conflicts-at-my-risk at the same time" + ) + if self.editable and not self.build_directory: + raise DistutilsArgError( + "Must specify a build directory (-b) when using --editable" + ) + if not self.args: + raise DistutilsArgError( + "No urls, filenames, or requirements specified (see --help)") + + self.outputs = [] + + def run(self): + if self.verbose!=self.distribution.verbose: + log.set_verbosity(self.verbose) + try: + for spec in self.args: + self.easy_install(spec, not self.no_deps) + if self.record: + outputs = self.outputs + if self.root: # strip any package prefix + root_len = len(self.root) + for counter in xrange(len(outputs)): + outputs[counter] = outputs[counter][root_len:] + from distutils import file_util + self.execute( + file_util.write_file, (self.record, outputs), + "writing list of installed files to '%s'" % + self.record + ) + self.warn_deprecated_options() + finally: + log.set_verbosity(self.distribution.verbose) + + def pseudo_tempname(self): + """Return a pseudo-tempname base in the install directory. + This code is intentionally naive; if a malicious party can write to + the target directory you're already in deep doodoo. + """ + try: + pid = os.getpid() + except: + pid = random.randint(0,sys.maxint) + return os.path.join(self.install_dir, "test-easy-install-%s" % pid) + + def warn_deprecated_options(self): + if self.delete_conflicting or self.ignore_conflicts_at_my_risk: + log.warn( + "Note: The -D, --delete-conflicting and" + " --ignore-conflicts-at-my-risk no longer have any purpose" + " and should not be used." + ) + + def check_site_dir(self): + """Verify that self.install_dir is .pth-capable dir, if needed""" + instdir = normalize_path(self.install_dir) + pth_file = os.path.join(instdir,'easy-install.pth') + + # mkdir it if necessary + try: + os.makedirs(instdir) + except OSError: + # Oh well -- hopefully this error simply means that it is already there. + # If not the subsequent write test will identify the problem. + pass + # add it to site dirs + self.all_site_dirs.append(instdir) + + # Is it a configured, PYTHONPATH, implicit, or explicit site dir? + is_site_dir = instdir in self.all_site_dirs + + if not is_site_dir and not self.multi_version: + # No? Then directly test whether it does .pth file processing + is_site_dir = self.check_pth_processing() + else: + # make sure we can write to target dir + testfile = self.pseudo_tempname()+'.write-test' + test_exists = os.path.exists(testfile) + try: + if test_exists: os.unlink(testfile) + open(testfile,'w').close() + os.unlink(testfile) + except (OSError,IOError): + self.cant_write_to_target() + + if not is_site_dir and not self.multi_version: + # Can't install non-multi to non-site dir + log.warn(self.no_default_version_msg()) + + if is_site_dir: + if self.pth_file is None: + self.pth_file = PthDistributions(pth_file, self.all_site_dirs) + else: + self.pth_file = None + + if self.multi_version and not os.path.exists(pth_file): + self.sitepy_installed = True # don't need site.py in this case + self.pth_file = None # and don't create a .pth file + self.install_dir = instdir + + def cant_write_to_target(self): + msg = """can't create or remove files in install directory + +The following error occurred while trying to add or remove files in the +installation directory: + + %s + +The installation directory you specified (via --install-dir, --prefix, or +the distutils default setting) was: + + %s +""" % (sys.exc_info()[1], self.install_dir,) + + if not os.path.exists(self.install_dir): + msg += """ +This directory does not currently exist. Please create it and try again, or +choose a different installation directory (using the -d or --install-dir +option). +""" + else: + msg += """ +Perhaps your account does not have write access to this directory? If the +installation directory is a system-owned directory, you may need to sign in +as the administrator or "root" account. If you do not have administrative +access to this machine, you may wish to choose a different installation +directory, preferably one that is listed in your PYTHONPATH environment +variable. + +For information on other options, you may wish to consult the +documentation at: + + http://peak.telecommunity.com/EasyInstall.html + +Please make the appropriate changes for your system and try again. +""" + raise DistutilsError(msg) + + + + + def check_pth_processing(self): + """Empirically verify whether .pth files are supported in inst. dir""" + instdir = self.install_dir + log.info("Checking .pth file support in %s", instdir) + pth_file = self.pseudo_tempname()+".pth" + ok_file = pth_file+'.ok' + ok_exists = os.path.exists(ok_file) + try: + if ok_exists: os.unlink(ok_file) + f = open(pth_file,'w') + except (OSError,IOError): + self.cant_write_to_target() + else: + try: + f.write("import os;open(%r,'w').write('OK')\n" % (ok_file,)) + f.close(); f=None + executable = sys.executable + if os.name=='nt': + dirname,basename = os.path.split(executable) + alt = os.path.join(dirname,'pythonw.exe') + if basename.lower()=='python.exe' and os.path.exists(alt): + # use pythonw.exe to avoid opening a console window + executable = alt + + from distutils.spawn import spawn + spawn([executable,'-E','-c','pass'],0) + + if os.path.exists(ok_file): + log.info( + "TEST PASSED: %s appears to support .pth files", + instdir + ) + return True + finally: + if f: f.close() + if os.path.exists(ok_file): os.unlink(ok_file) + if os.path.exists(pth_file): os.unlink(pth_file) + if not self.multi_version: + log.warn("TEST FAILED: %s does NOT support .pth files", instdir) + return False + + def install_egg_scripts(self, dist): + """Write all the scripts for `dist`, unless scripts are excluded""" + if not self.exclude_scripts and dist.metadata_isdir('scripts'): + for script_name in dist.metadata_listdir('scripts'): + self.install_script( + dist, script_name, + dist.get_metadata('scripts/'+script_name) + ) + self.install_wrapper_scripts(dist) + + def add_output(self, path): + if os.path.isdir(path): + for base, dirs, files in os.walk(path): + for filename in files: + self.outputs.append(os.path.join(base,filename)) + else: + self.outputs.append(path) + + def not_editable(self, spec): + if self.editable: + raise DistutilsArgError( + "Invalid argument %r: you can't use filenames or URLs " + "with --editable (except via the --find-links option)." + % (spec,) + ) + + def check_editable(self,spec): + if not self.editable: + return + + if os.path.exists(os.path.join(self.build_directory, spec.key)): + raise DistutilsArgError( + "%r already exists in %s; can't do a checkout there" % + (spec.key, self.build_directory) + ) + + + + + + + def easy_install(self, spec, deps=False): + tmpdir = tempfile.mkdtemp(prefix="easy_install-") + download = None + if not self.editable: self.install_site_py() + + try: + if not isinstance(spec,Requirement): + if URL_SCHEME(spec): + # It's a url, download it to tmpdir and process + self.not_editable(spec) + download = self.package_index.download(spec, tmpdir) + return self.install_item(None, download, tmpdir, deps, True) + + elif os.path.exists(spec): + # Existing file or directory, just process it directly + self.not_editable(spec) + return self.install_item(None, spec, tmpdir, deps, True) + else: + spec = parse_requirement_arg(spec) + + self.check_editable(spec) + dist = self.package_index.fetch_distribution( + spec, tmpdir, self.upgrade, self.editable, not self.always_copy, + self.local_index + ) + if dist is None: + msg = "Could not find suitable distribution for %r" % spec + if self.always_copy: + msg+=" (--always-copy skips system and development eggs)" + raise DistutilsError(msg) + elif dist.precedence==DEVELOP_DIST: + # .egg-info dists don't need installing, just process deps + self.process_distribution(spec, dist, deps, "Using") + return dist + else: + return self.install_item(spec, dist.location, tmpdir, deps) + + finally: + if os.path.exists(tmpdir): + rmtree(tmpdir) + + def install_item(self, spec, download, tmpdir, deps, install_needed=False): + + # Installation is also needed if file in tmpdir or is not an egg + install_needed = install_needed or self.always_copy + install_needed = install_needed or os.path.dirname(download) == tmpdir + install_needed = install_needed or not download.endswith('.egg') + install_needed = install_needed or ( + self.always_copy_from is not None and + os.path.dirname(normalize_path(download)) == + normalize_path(self.always_copy_from) + ) + + if spec and not install_needed: + # at this point, we know it's a local .egg, we just don't know if + # it's already installed. + for dist in self.local_index[spec.project_name]: + if dist.location==download: + break + else: + install_needed = True # it's not in the local index + + log.info("Processing %s", os.path.basename(download)) + + if install_needed: + dists = self.install_eggs(spec, download, tmpdir) + for dist in dists: + self.process_distribution(spec, dist, deps) + else: + dists = [self.check_conflicts(self.egg_distribution(download))] + self.process_distribution(spec, dists[0], deps, "Using") + + if spec is not None: + for dist in dists: + if dist in spec: + return dist + + + + + + + def process_distribution(self, requirement, dist, deps=True, *info): + self.update_pth(dist) + self.package_index.add(dist) + self.local_index.add(dist) + self.install_egg_scripts(dist) + self.installed_projects[dist.key] = dist + log.info(self.installation_report(requirement, dist, *info)) + if dist.has_metadata('dependency_links.txt'): + self.package_index.add_find_links( + dist.get_metadata_lines('dependency_links.txt') + ) + if not deps and not self.always_copy: + return + elif requirement is not None and dist.key != requirement.key: + log.warn("Skipping dependencies for %s", dist) + return # XXX this is not the distribution we were looking for + elif requirement is None or dist not in requirement: + # if we wound up with a different version, resolve what we've got + distreq = dist.as_requirement() + requirement = requirement or distreq + requirement = Requirement( + distreq.project_name, distreq.specs, requirement.extras + ) + log.info("Processing dependencies for %s", requirement) + try: + distros = WorkingSet([]).resolve( + [requirement], self.local_index, self.easy_install + ) + except DistributionNotFound, e: + raise DistutilsError( + "Could not find required distribution %s" % e.args + ) + except VersionConflict, e: + raise DistutilsError( + "Installed distribution %s conflicts with requirement %s" + % e.args + ) + if self.always_copy or self.always_copy_from: + # Force all the relevant distros to be copied or activated + for dist in distros: + if dist.key not in self.installed_projects: + self.easy_install(dist.as_requirement()) + log.info("Finished processing dependencies for %s", requirement) + + def should_unzip(self, dist): + if self.zip_ok is not None: + return not self.zip_ok + if dist.has_metadata('not-zip-safe'): + return True + if not dist.has_metadata('zip-safe'): + return True + return False + + def maybe_move(self, spec, dist_filename, setup_base): + dst = os.path.join(self.build_directory, spec.key) + if os.path.exists(dst): + log.warn( + "%r already exists in %s; build directory %s will not be kept", + spec.key, self.build_directory, setup_base + ) + return setup_base + if os.path.isdir(dist_filename): + setup_base = dist_filename + else: + if os.path.dirname(dist_filename)==setup_base: + os.unlink(dist_filename) # get it out of the tmp dir + contents = os.listdir(setup_base) + if len(contents)==1: + dist_filename = os.path.join(setup_base,contents[0]) + if os.path.isdir(dist_filename): + # if the only thing there is a directory, move it instead + setup_base = dist_filename + ensure_directory(dst); shutil.move(setup_base, dst) + return dst + + def install_wrapper_scripts(self, dist): + if not self.exclude_scripts: + for args in get_script_args(dist, script_dir=self.script_dir): + self.write_script(*args) + + + + def install_script(self, dist, script_name, script_text, dev_path=None): + """Generate a legacy script wrapper and install it""" + spec = str(dist.as_requirement()) + is_script = is_python_script(script_text, script_name) + + requires = [spec] + [str(r) for r in dist.requires()] + if is_script and dev_path: + script_text = get_script_header(script_text) + ( + "# EASY-INSTALL-DEV-SCRIPT: %(spec)r,%(script_name)r\n" + "__requires__ = %(requires)r\n" + "from pkg_resources import require; require(%(spec)r)\n" + "del require\n" + "__file__ = %(dev_path)r\n" + "execfile(__file__)\n" + ) % locals() + elif is_script: + script_text = get_script_header(script_text) + ( + "# EASY-INSTALL-SCRIPT: %(spec)r,%(script_name)r\n" + "__requires__ = %(requires)r\n" + "import pkg_resources\n" + "pkg_resources.run_script(%(spec)r, %(script_name)r)\n" + ) % locals() + self.write_script(script_name, script_text, 'b') + + def write_script(self, script_name, contents, mode="t", blockers=()): + """Write an executable file to the scripts directory""" + self.delete_blockers( # clean up old .py/.pyw w/o a script + [os.path.join(self.script_dir,x) for x in blockers]) + log.info("Installing %s script to %s", script_name, self.script_dir) + target = os.path.join(self.script_dir, script_name) + self.add_output(target) + + if not self.dry_run: + ensure_directory(target) + f = open(target,"w"+mode) + f.write(contents) + f.close() + chmod(target,0755) + + + + + def install_eggs(self, spec, dist_filename, tmpdir): + # .egg dirs or files are already built, so just return them + if dist_filename.lower().endswith('.egg'): + return [self.install_egg(dist_filename, tmpdir)] + elif dist_filename.lower().endswith('.exe'): + return [self.install_exe(dist_filename, tmpdir)] + + # Anything else, try to extract and build + setup_base = tmpdir + if os.path.isfile(dist_filename) and not dist_filename.endswith('.py'): + unpack_archive(dist_filename, tmpdir, self.unpack_progress) + elif os.path.isdir(dist_filename): + setup_base = os.path.abspath(dist_filename) + + if (setup_base.startswith(tmpdir) # something we downloaded + and self.build_directory and spec is not None + ): + setup_base = self.maybe_move(spec, dist_filename, setup_base) + + # Find the setup.py file + setup_script = os.path.join(setup_base, 'setup.py') + + if not os.path.exists(setup_script): + setups = glob(os.path.join(setup_base, '*', 'setup.py')) + if not setups: + raise DistutilsError( + "Couldn't find a setup script in %s" % os.path.abspath(dist_filename) + ) + if len(setups)>1: + raise DistutilsError( + "Multiple setup scripts in %s" % os.path.abspath(dist_filename) + ) + setup_script = setups[0] + + # Now run it, and return the result + if self.editable: + log.info(self.report_editable(spec, setup_script)) + return [] + else: + return self.build_and_install(setup_script, setup_base) + + def egg_distribution(self, egg_path): + if os.path.isdir(egg_path): + metadata = PathMetadata(egg_path,os.path.join(egg_path,'EGG-INFO')) + else: + metadata = EggMetadata(zipimport.zipimporter(egg_path)) + return Distribution.from_filename(egg_path,metadata=metadata) + + def install_egg(self, egg_path, tmpdir): + destination = os.path.join(self.install_dir,os.path.basename(egg_path)) + destination = os.path.abspath(destination) + if not self.dry_run: + ensure_directory(destination) + + dist = self.egg_distribution(egg_path) + self.check_conflicts(dist) + if not samefile(egg_path, destination): + if os.path.isdir(destination) and not os.path.islink(destination): + dir_util.remove_tree(destination, dry_run=self.dry_run) + elif os.path.exists(destination): + self.execute(os.unlink,(destination,),"Removing "+destination) + uncache_zipdir(destination) + if os.path.isdir(egg_path): + if egg_path.startswith(tmpdir): + f,m = shutil.move, "Moving" + else: + f,m = shutil.copytree, "Copying" + elif self.should_unzip(dist): + self.mkpath(destination) + f,m = self.unpack_and_compile, "Extracting" + elif egg_path.startswith(tmpdir): + f,m = shutil.move, "Moving" + else: + f,m = shutil.copy2, "Copying" + + self.execute(f, (egg_path, destination), + (m+" %s to %s") % + (os.path.basename(egg_path),os.path.dirname(destination))) + + self.add_output(destination) + return self.egg_distribution(destination) + + def install_exe(self, dist_filename, tmpdir): + # See if it's valid, get data + cfg = extract_wininst_cfg(dist_filename) + if cfg is None: + raise DistutilsError( + "%s is not a valid distutils Windows .exe" % dist_filename + ) + # Create a dummy distribution object until we build the real distro + dist = Distribution(None, + project_name=cfg.get('metadata','name'), + version=cfg.get('metadata','version'), platform="win32" + ) + + # Convert the .exe to an unpacked egg + egg_path = dist.location = os.path.join(tmpdir, dist.egg_name()+'.egg') + egg_tmp = egg_path+'.tmp' + egg_info = os.path.join(egg_tmp, 'EGG-INFO') + pkg_inf = os.path.join(egg_info, 'PKG-INFO') + ensure_directory(pkg_inf) # make sure EGG-INFO dir exists + dist._provider = PathMetadata(egg_tmp, egg_info) # XXX + self.exe_to_egg(dist_filename, egg_tmp) + + # Write EGG-INFO/PKG-INFO + if not os.path.exists(pkg_inf): + f = open(pkg_inf,'w') + f.write('Metadata-Version: 1.0\n') + for k,v in cfg.items('metadata'): + if k!='target_version': + f.write('%s: %s\n' % (k.replace('_','-').title(), v)) + f.close() + script_dir = os.path.join(egg_info,'scripts') + self.delete_blockers( # delete entry-point scripts to avoid duping + [os.path.join(script_dir,args[0]) for args in get_script_args(dist)] + ) + # Build .egg file from tmpdir + bdist_egg.make_zipfile( + egg_path, egg_tmp, verbose=self.verbose, dry_run=self.dry_run + ) + # install the .egg + return self.install_egg(egg_path, tmpdir) + + def exe_to_egg(self, dist_filename, egg_tmp): + """Extract a bdist_wininst to the directories an egg would use""" + # Check for .pth file and set up prefix translations + prefixes = get_exe_prefixes(dist_filename) + to_compile = [] + native_libs = [] + top_level = {} + def process(src,dst): + s = src.lower() + for old,new in prefixes: + if s.startswith(old): + src = new+src[len(old):] + parts = src.split('/') + dst = os.path.join(egg_tmp, *parts) + dl = dst.lower() + if dl.endswith('.pyd') or dl.endswith('.dll'): + parts[-1] = bdist_egg.strip_module(parts[-1]) + top_level[os.path.splitext(parts[0])[0]] = 1 + native_libs.append(src) + elif dl.endswith('.py') and old!='SCRIPTS/': + top_level[os.path.splitext(parts[0])[0]] = 1 + to_compile.append(dst) + return dst + if not src.endswith('.pth'): + log.warn("WARNING: can't process %s", src) + return None + # extract, tracking .pyd/.dll->native_libs and .py -> to_compile + unpack_archive(dist_filename, egg_tmp, process) + stubs = [] + for res in native_libs: + if res.lower().endswith('.pyd'): # create stubs for .pyd's + parts = res.split('/') + resource = parts[-1] + parts[-1] = bdist_egg.strip_module(parts[-1])+'.py' + pyfile = os.path.join(egg_tmp, *parts) + to_compile.append(pyfile); stubs.append(pyfile) + bdist_egg.write_stub(resource, pyfile) + self.byte_compile(to_compile) # compile .py's + bdist_egg.write_safety_flag(os.path.join(egg_tmp,'EGG-INFO'), + bdist_egg.analyze_egg(egg_tmp, stubs)) # write zip-safety flag + + for name in 'top_level','native_libs': + if locals()[name]: + txt = os.path.join(egg_tmp, 'EGG-INFO', name+'.txt') + if not os.path.exists(txt): + open(txt,'w').write('\n'.join(locals()[name])+'\n') + + def check_conflicts(self, dist): + """Verify that there are no conflicting "old-style" packages""" + + return dist # XXX temporarily disable until new strategy is stable + from imp import find_module, get_suffixes + from glob import glob + + blockers = [] + names = dict.fromkeys(dist._get_metadata('top_level.txt')) # XXX private attr + + exts = {'.pyc':1, '.pyo':1} # get_suffixes() might leave one out + for ext,mode,typ in get_suffixes(): + exts[ext] = 1 + + for path,files in expand_paths([self.install_dir]+self.all_site_dirs): + for filename in files: + base,ext = os.path.splitext(filename) + if base in names: + if not ext: + # no extension, check for package + try: + f, filename, descr = find_module(base, [path]) + except ImportError: + continue + else: + if f: f.close() + if filename not in blockers: + blockers.append(filename) + elif ext in exts and base!='site': # XXX ugh + blockers.append(os.path.join(path,filename)) + if blockers: + self.found_conflicts(dist, blockers) + + return dist + + def found_conflicts(self, dist, blockers): + if self.delete_conflicting: + log.warn("Attempting to delete conflicting packages:") + return self.delete_blockers(blockers) + + msg = """\ +------------------------------------------------------------------------- +CONFLICT WARNING: + +The following modules or packages have the same names as modules or +packages being installed, and will be *before* the installed packages in +Python's search path. You MUST remove all of the relevant files and +directories before you will be able to use the package(s) you are +installing: + + %s + +""" % '\n '.join(blockers) + + if self.ignore_conflicts_at_my_risk: + msg += """\ +(Note: you can run EasyInstall on '%s' with the +--delete-conflicting option to attempt deletion of the above files +and/or directories.) +""" % dist.project_name + else: + msg += """\ +Note: you can attempt this installation again with EasyInstall, and use +either the --delete-conflicting (-D) option or the +--ignore-conflicts-at-my-risk option, to either delete the above files +and directories, or to ignore the conflicts, respectively. Note that if +you ignore the conflicts, the installed package(s) may not work. +""" + msg += """\ +------------------------------------------------------------------------- +""" + sys.stderr.write(msg) + sys.stderr.flush() + if not self.ignore_conflicts_at_my_risk: + raise DistutilsError("Installation aborted due to conflicts") + + def installation_report(self, req, dist, what="Installed"): + """Helpful installation message for display to package users""" + msg = "\n%(what)s %(eggloc)s%(extras)s" + if self.multi_version and not self.no_report: + msg += """ + +Because this distribution was installed --multi-version, before you can +import modules from this package in an application, you will need to +'import pkg_resources' and then use a 'require()' call similar to one of +these examples, in order to select the desired version: + + pkg_resources.require("%(name)s") # latest installed version + pkg_resources.require("%(name)s==%(version)s") # this exact version + pkg_resources.require("%(name)s>=%(version)s") # this version or higher +""" + if self.install_dir not in map(normalize_path,sys.path): + msg += """ + +Note also that the installation directory must be on sys.path at runtime for +this to work. (e.g. by being the application's script directory, by being on +PYTHONPATH, or by being added to sys.path by your code.) +""" + eggloc = dist.location + name = dist.project_name + version = dist.version + extras = '' # TODO: self.report_extras(req, dist) + return msg % locals() + + def report_editable(self, spec, setup_script): + dirname = os.path.dirname(setup_script) + python = sys.executable + return """\nExtracted editable version of %(spec)s to %(dirname)s + +If it uses setuptools in its setup script, you can activate it in +"development" mode by going to that directory and running:: + + %(python)s setup.py develop + +See the setuptools documentation for the "develop" command for more info. +""" % locals() + + def run_setup(self, setup_script, setup_base, args): + sys.modules.setdefault('distutils.command.bdist_egg', bdist_egg) + sys.modules.setdefault('distutils.command.egg_info', egg_info) + + args = list(args) + if self.verbose>2: + v = 'v' * (self.verbose - 1) + args.insert(0,'-'+v) + elif self.verbose<2: + args.insert(0,'-q') + if self.dry_run: + args.insert(0,'-n') + log.info( + "Running %s %s", setup_script[len(setup_base)+1:], ' '.join(args) + ) + try: + run_setup(setup_script, args) + except SystemExit, v: + raise DistutilsError("Setup script exited with %s" % (v.args[0],)) + + def build_and_install(self, setup_script, setup_base): + args = ['bdist_egg', '--dist-dir'] + dist_dir = tempfile.mkdtemp( + prefix='egg-dist-tmp-', dir=os.path.dirname(setup_script) + ) + try: + args.append(dist_dir) + self.run_setup(setup_script, setup_base, args) + all_eggs = Environment([dist_dir]) + eggs = [] + for key in all_eggs: + for dist in all_eggs[key]: + eggs.append(self.install_egg(dist.location, setup_base)) + if not eggs and not self.dry_run: + log.warn("No eggs found in %s (setup script problem?)", + dist_dir) + return eggs + finally: + rmtree(dist_dir) + log.set_verbosity(self.verbose) # restore our log verbosity + + def update_pth(self,dist): + if self.pth_file is None: + return + + for d in self.pth_file[dist.key]: # drop old entries + if self.multi_version or d.location != dist.location: + log.info("Removing %s from easy-install.pth file", d) + self.pth_file.remove(d) + if d.location in self.shadow_path: + self.shadow_path.remove(d.location) + + if not self.multi_version: + if dist.location in self.pth_file.paths: + log.info( + "%s is already the active version in easy-install.pth", + dist + ) + else: + log.info("Adding %s to easy-install.pth file", dist) + self.pth_file.add(dist) # add new entry + if dist.location not in self.shadow_path: + self.shadow_path.append(dist.location) + + if not self.dry_run: + + self.pth_file.save() + + if dist.key=='setuptools': + # Ensure that setuptools itself never becomes unavailable! + # XXX should this check for latest version? + filename = os.path.join(self.install_dir,'setuptools.pth') + if os.path.islink(filename): os.unlink(filename) + f = open(filename, 'wt') + f.write(self.pth_file.make_relative(dist.location)+'\n') + f.close() + + def unpack_progress(self, src, dst): + # Progress filter for unpacking + log.debug("Unpacking %s to %s", src, dst) + return dst # only unpack-and-compile skips files for dry run + + def unpack_and_compile(self, egg_path, destination): + to_compile = []; to_chmod = [] + + def pf(src,dst): + if dst.endswith('.py') and not src.startswith('EGG-INFO/'): + to_compile.append(dst) + elif dst.endswith('.dll') or dst.endswith('.so'): + to_chmod.append(dst) + self.unpack_progress(src,dst) + return not self.dry_run and dst or None + + unpack_archive(egg_path, destination, pf) + self.byte_compile(to_compile) + if not self.dry_run: + for f in to_chmod: + mode = ((os.stat(f)[stat.ST_MODE]) | 0555) & 07755 + chmod(f, mode) + + def byte_compile(self, to_compile): + if _dont_write_bytecode: + self.warn('byte-compiling is disabled, skipping.') + return + from distutils.util import byte_compile + try: + # try to make the byte compile messages quieter + log.set_verbosity(self.verbose - 1) + + byte_compile(to_compile, optimize=0, force=1, dry_run=self.dry_run) + if self.optimize: + byte_compile( + to_compile, optimize=self.optimize, force=1, + dry_run=self.dry_run + ) + finally: + log.set_verbosity(self.verbose) # restore original verbosity + + + + + + + + + + def no_default_version_msg(self): + return """bad install directory or PYTHONPATH + +You are attempting to install a package to a directory that is not +on PYTHONPATH and which Python does not read ".pth" files from. The +installation directory you specified (via --install-dir, --prefix, or +the distutils default setting) was: + + %s + +and your PYTHONPATH environment variable currently contains: + + %r + +Here are some of your options for correcting the problem: + +* You can choose a different installation directory, i.e., one that is + on PYTHONPATH or supports .pth files + +* You can add the installation directory to the PYTHONPATH environment + variable. (It must then also be on PYTHONPATH whenever you run + Python and want to use the package(s) you are installing.) + +* You can set up the installation directory to support ".pth" files by + using one of the approaches described here: + + http://peak.telecommunity.com/EasyInstall.html#custom-installation-locations + +Proceeding to install. Please remember that unless you make one of +these changes you will not be able to run the installed code. +""" % ( + self.install_dir, os.environ.get('PYTHONPATH','') + ) + + + + + + + + + + + def install_site_py(self): + """Make sure there's a site.py in the target dir, if needed""" + + if self.sitepy_installed: + return # already did it, or don't need to + + sitepy = os.path.join(self.install_dir, "site.py") + source = resource_string("setuptools", "site-patch.py") + current = "" + + if os.path.exists(sitepy): + log.debug("Checking existing site.py in %s", self.install_dir) + current = open(sitepy,'rb').read() + if not current.startswith('def __boot():'): + print ("\n" + "***********************************************************************\n" + "Warning: %s is not a\n" + "setuptools-generated site.py. It will not be overwritten.\n" + "***********************************************************************\n" + ) % (sitepy,) + self.sitepy_installed = True + return + + if current != source: + log.info("Creating %s", sitepy) + if not self.dry_run: + ensure_directory(sitepy) + f = open(sitepy,'wb') + f.write(source) + f.close() + self.byte_compile([sitepy]) + + self.sitepy_installed = True + + + + + + + + + + + + + INSTALL_SCHEMES = dict( + posix = dict( + install_dir = '$base/lib/python$py_version_short/site-packages', + script_dir = '$base/bin', + ), + ) + + DEFAULT_SCHEME = dict( + install_dir = '$base/Lib/site-packages', + script_dir = '$base/Scripts', + ) + + def _expand(self, *attrs): + config_vars = self.get_finalized_command('install').config_vars + + if self.prefix: + # Set default install_dir/scripts from --prefix + config_vars = config_vars.copy() + config_vars['base'] = self.prefix + scheme = self.INSTALL_SCHEMES.get(os.name,self.DEFAULT_SCHEME) + for attr,val in scheme.items(): + if getattr(self,attr,None) is None: + setattr(self,attr,val) + + from distutils.util import subst_vars + for attr in attrs: + val = getattr(self, attr) + if val is not None: + val = subst_vars(val, config_vars) + if os.name == 'posix': + val = os.path.expanduser(val) + setattr(self, attr, val) + + + + + + + + + +def get_site_dirs(): + # return a list of 'site' dirs + sitedirs = filter(None,os.environ.get('PYTHONPATH','').split(os.pathsep)) + prefixes = [sys.prefix] + if sys.exec_prefix != sys.prefix: + prefixes.append(sys.exec_prefix) + for prefix in prefixes: + if prefix: + if sys.platform in ('os2emx', 'riscos'): + sitedirs.append(os.path.join(prefix, "Lib", "site-packages")) + elif os.sep == '/': + sitedirs.extend([os.path.join(prefix, + "lib", + "python" + sys.version[:3], + "site-packages"), + os.path.join(prefix, "lib", "site-python")]) + else: + sitedirs.extend( + [prefix, os.path.join(prefix, "lib", "site-packages")] + ) + if sys.platform == 'darwin': + # for framework builds *only* we add the standard Apple + # locations. Currently only per-user, but /Library and + # /Network/Library could be added too + if 'Python.framework' in prefix: + home = os.environ.get('HOME') + if home: + sitedirs.append( + os.path.join(home, + 'Library', + 'Python', + sys.version[:3], + 'site-packages')) + for plat_specific in (0,1): + site_lib = get_python_lib(plat_specific) + if site_lib not in sitedirs: sitedirs.append(site_lib) + + sitedirs = map(normalize_path, sitedirs) + return sitedirs + + +def expand_paths(inputs): + """Yield sys.path directories that might contain "old-style" packages""" + + seen = {} + + for dirname in inputs: + dirname = normalize_path(dirname) + if dirname in seen: + continue + + seen[dirname] = 1 + if not os.path.isdir(dirname): + continue + + files = os.listdir(dirname) + yield dirname, files + + for name in files: + if not name.endswith('.pth'): + # We only care about the .pth files + continue + if name in ('easy-install.pth','setuptools.pth'): + # Ignore .pth files that we control + continue + + # Read the .pth file + f = open(os.path.join(dirname,name)) + lines = list(yield_lines(f)) + f.close() + + # Yield existing non-dupe, non-import directory lines from it + for line in lines: + if not line.startswith("import"): + line = normalize_path(line.rstrip()) + if line not in seen: + seen[line] = 1 + if not os.path.isdir(line): + continue + yield line, os.listdir(line) + + +def extract_wininst_cfg(dist_filename): + """Extract configuration data from a bdist_wininst .exe + + Returns a ConfigParser.RawConfigParser, or None + """ + f = open(dist_filename,'rb') + try: + endrec = zipfile._EndRecData(f) + if endrec is None: + return None + + prepended = (endrec[9] - endrec[5]) - endrec[6] + if prepended < 12: # no wininst data here + return None + f.seek(prepended-12) + + import struct, StringIO, ConfigParser + tag, cfglen, bmlen = struct.unpack("egg path translations for a given .exe file""" + + prefixes = [ + ('PURELIB/', ''), ('PLATLIB/pywin32_system32', ''), + ('PLATLIB/', ''), + ('SCRIPTS/', 'EGG-INFO/scripts/') + ] + z = zipfile.ZipFile(exe_filename) + try: + for info in z.infolist(): + name = info.filename + parts = name.split('/') + if len(parts)==3 and parts[2]=='PKG-INFO': + if parts[1].endswith('.egg-info'): + prefixes.insert(0,('/'.join(parts[:2]), 'EGG-INFO/')) + break + if len(parts)!=2 or not name.endswith('.pth'): + continue + if name.endswith('-nspkg.pth'): + continue + if parts[0].upper() in ('PURELIB','PLATLIB'): + for pth in yield_lines(z.read(name)): + pth = pth.strip().replace('\\','/') + if not pth.startswith('import'): + prefixes.append((('%s/%s/' % (parts[0],pth)), '')) + finally: + z.close() + prefixes = [(x.lower(),y) for x, y in prefixes] + prefixes.sort(); prefixes.reverse() + return prefixes + + +def parse_requirement_arg(spec): + try: + return Requirement.parse(spec) + except ValueError: + raise DistutilsError( + "Not a URL, existing file, or requirement spec: %r" % (spec,) + ) + +class PthDistributions(Environment): + """A .pth file with Distribution paths in it""" + + dirty = False + + def __init__(self, filename, sitedirs=()): + self.filename = filename; self.sitedirs=map(normalize_path, sitedirs) + self.basedir = normalize_path(os.path.dirname(self.filename)) + self._load(); Environment.__init__(self, [], None, None) + for path in yield_lines(self.paths): + map(self.add, find_distributions(path, True)) + + def _load(self): + self.paths = [] + saw_import = False + seen = dict.fromkeys(self.sitedirs) + if os.path.isfile(self.filename): + for line in open(self.filename,'rt'): + if line.startswith('import'): + saw_import = True + continue + path = line.rstrip() + self.paths.append(path) + if not path.strip() or path.strip().startswith('#'): + continue + # skip non-existent paths, in case somebody deleted a package + # manually, and duplicate paths as well + path = self.paths[-1] = normalize_path( + os.path.join(self.basedir,path) + ) + if not os.path.exists(path) or path in seen: + self.paths.pop() # skip it + self.dirty = True # we cleaned up, so we're dirty now :) + continue + seen[path] = 1 + + if self.paths and not saw_import: + self.dirty = True # ensure anything we touch has import wrappers + while self.paths and not self.paths[-1].strip(): + self.paths.pop() + + def save(self): + """Write changed .pth file back to disk""" + if not self.dirty: + return + + data = '\n'.join(map(self.make_relative,self.paths)) + if data: + log.debug("Saving %s", self.filename) + data = ( + "import sys; sys.__plen = len(sys.path)\n" + "%s\n" + "import sys; new=sys.path[sys.__plen:];" + " del sys.path[sys.__plen:];" + " p=getattr(sys,'__egginsert',len(os.environ.get('PYTHONPATH','').split(os.pathsep))); sys.path[p:p]=new;" + " sys.__egginsert = p+len(new)\n" + ) % data + + if os.path.islink(self.filename): + os.unlink(self.filename) + f = open(self.filename,'wb') + f.write(data); f.close() + + elif os.path.exists(self.filename): + log.debug("Deleting empty %s", self.filename) + os.unlink(self.filename) + + self.dirty = False + + def add(self,dist): + """Add `dist` to the distribution map""" + if dist.location not in self.paths and dist.location not in self.sitedirs: + self.paths.append(dist.location); self.dirty = True + Environment.add(self,dist) + + def remove(self,dist): + """Remove `dist` from the distribution map""" + while dist.location in self.paths: + self.paths.remove(dist.location); self.dirty = True + Environment.remove(self,dist) + + + def make_relative(self,path): + npath, last = os.path.split(normalize_path(path)) + baselen = len(self.basedir) + parts = [last] + sep = os.altsep=='/' and '/' or os.sep + while len(npath)>=baselen: + if npath==self.basedir: + parts.append(os.curdir) + parts.reverse() + return sep.join(parts) + npath, last = os.path.split(npath) + parts.append(last) + else: + return path + +def get_script_header(script_text, executable=sys_executable, wininst=False): + """Create a #! line, getting options (if any) from script_text""" + from distutils.command.build_scripts import first_line_re + first = (script_text+'\n').splitlines()[0] + match = first_line_re.match(first) + options = '' + if match: + options = match.group(1) or '' + if options: options = ' '+options + if wininst: + executable = "python.exe" + else: + executable = nt_quote_arg(executable) + hdr = "#!%(executable)s%(options)s\n" % locals() + if unicode(hdr,'ascii','ignore').encode('ascii') != hdr: + # Non-ascii path to sys.executable, use -x to prevent warnings + if options: + if options.strip().startswith('-'): + options = ' -x'+options.strip()[1:] + # else: punt, we can't do it, let the warning happen anyway + else: + options = ' -x' + executable = fix_jython_executable(executable, options) + hdr = "#!%(executable)s%(options)s\n" % locals() + return hdr + +def auto_chmod(func, arg, exc): + if func is os.remove and os.name=='nt': + chmod(arg, stat.S_IWRITE) + return func(arg) + exc = sys.exc_info() + raise exc[0], (exc[1][0], exc[1][1] + (" %s %s" % (func,arg))) + +def uncache_zipdir(path): + """Ensure that the importer caches dont have stale info for `path`""" + from zipimport import _zip_directory_cache as zdc + _uncache(path, zdc) + _uncache(path, sys.path_importer_cache) + +def _uncache(path, cache): + if path in cache: + del cache[path] + else: + path = normalize_path(path) + for p in cache: + if normalize_path(p)==path: + del cache[p] + return + +def is_python(text, filename=''): + "Is this string a valid Python script?" + try: + compile(text, filename, 'exec') + except (SyntaxError, TypeError): + return False + else: + return True + +def is_sh(executable): + """Determine if the specified executable is a .sh (contains a #! line)""" + try: + fp = open(executable) + magic = fp.read(2) + fp.close() + except (OSError,IOError): return executable + return magic == '#!' + +def nt_quote_arg(arg): + """Quote a command line argument according to Windows parsing rules""" + + result = [] + needquote = False + nb = 0 + + needquote = (" " in arg) or ("\t" in arg) + if needquote: + result.append('"') + + for c in arg: + if c == '\\': + nb += 1 + elif c == '"': + # double preceding backslashes, then add a \" + result.append('\\' * (nb*2) + '\\"') + nb = 0 + else: + if nb: + result.append('\\' * nb) + nb = 0 + result.append(c) + + if nb: + result.append('\\' * nb) + + if needquote: + result.append('\\' * nb) # double the trailing backslashes + result.append('"') + + return ''.join(result) + + + + + + + + + +def is_python_script(script_text, filename): + """Is this text, as a whole, a Python script? (as opposed to shell/bat/etc. + """ + if filename.endswith('.py') or filename.endswith('.pyw'): + return True # extension says it's Python + if is_python(script_text, filename): + return True # it's syntactically valid Python + if script_text.startswith('#!'): + # It begins with a '#!' line, so check if 'python' is in it somewhere + return 'python' in script_text.splitlines()[0].lower() + + return False # Not any Python I can recognize + +try: + from os import chmod as _chmod +except ImportError: + # Jython compatibility + def _chmod(*args): pass + +def chmod(path, mode): + log.debug("changing mode of %s to %o", path, mode) + try: + _chmod(path, mode) + except os.error, e: + log.debug("chmod failed: %s", e) + +def fix_jython_executable(executable, options): + if sys.platform.startswith('java') and is_sh(executable): + # Workaround Jython's sys.executable being a .sh (an invalid + # shebang line interpreter) + if options: + # Can't apply the workaround, leave it broken + log.warn("WARNING: Unable to adapt shebang line for Jython," + " the following script is NOT executable\n" + " see http://bugs.jython.org/issue1112 for" + " more information.") + else: + return '/usr/bin/env %s' % executable + return executable + + +def get_script_args(dist, executable=sys_executable, wininst=False, script_dir=None): + """Yield write_script() argument tuples for a distribution's entrypoints""" + spec = str(dist.as_requirement()) + requires = [spec] + [str(r) for r in dist.requires()] + header = get_script_header("", executable, wininst) + generated_by = "# generated by zetuptoolz %s" % (setuptools_version,) + + for group in 'console_scripts', 'gui_scripts': + for name, ep in dist.get_entry_map(group).items(): + script_head, script_tail = (( + "# EASY-INSTALL-ENTRY-SCRIPT: %(spec)r,%(group)r,%(name)r\n" + "%(generated_by)s\n" + "__requires__ = %(requires)r\n" + "import sys\n" + "from pkg_resources import load_entry_point\n" + "\n" + ) % locals(), ( + "sys.exit(\n" + " load_entry_point(%(spec)r, %(group)r, %(name)r)()\n" + ")\n" + ) % locals()) + + if wininst or sys.platform == "win32": + # On Windows/wininst, add a .py[w] extension. Delete any existing + # -script.py[w], .exe, and .exe.manifest. + if group=='gui_scripts': + ext = '.pyw' + old = ['','.pyw','-script.pyw','.exe','.exe.manifest'] + which_python = 'pythonw.exe' + new_header = re.sub('(?i)python.exe', which_python, header) + else: + ext = '.pyscript' + old = ['','.pyscript','.py','.pyc','.pyo','-script.py','.exe','.exe.manifest'] + which_python = 'python.exe' + new_header = re.sub('(?i)pythonw.exe', which_python, header) + + len_ext = len(ext) + script_head += ( + "# If this script doesn't work for you, make sure that the %(ext)s\n" + "# extension is included in the PATHEXT environment variable, and is\n" + "# associated with %(which_python)s in the registry.\n" + "\n" + "if sys.argv[0].endswith(%(ext)r):\n" + " sys.argv[0] = sys.argv[0][:-%(len_ext)r]\n" + "\n" + ) % locals() + + if os.path.exists(new_header[2:-1]) or sys.platform != 'win32': + hdr = new_header + else: + hdr = header + yield (name+ext, hdr + script_head + script_tail, 't', [name+x for x in old]) + + # Also write a shell script that runs the .pyscript, for cygwin. + # + # We can't use a Python script, because the Python interpreter that we want + # to use is the native Windows one, which won't understand a cygwin path. + # Windows paths written with forward slashes are universally understood + # (by native Python, cygwin Python, and bash), so we'll use 'cygpath -m' to + # get the directory from which the script was run in that form. This makes + # the cygwin script and .pyscript position-independent, provided they are + # in the same directory. + + def quote_path(s): + return "\\'".join("'" + p.replace('\\', '/') + "'" for p in s.split("'")) + + pyscript = quote_path("/"+name+ext) + python_path = quote_path(sys.executable) + shell_script_text = ( + '#!/bin/sh\n' + '%(generated_by)s\n' + '\n' + 'ScriptDir=`cygpath -m "$0/.."`\n' + '%(python_path)s "${ScriptDir}"%(pyscript)s "$@"\n' + ) % locals() + yield (name, shell_script_text, 'b') + else: + # On other platforms, we assume the right thing to do is to + # just write the stub with no extension. + yield (name, header + script_head + script_tail) + + +def rmtree(path, ignore_errors=False, onerror=auto_chmod): + """Recursively delete a directory tree. + + This code is taken from the Python 2.4 version of 'shutil', because + the 2.3 version doesn't really work right. + """ + if ignore_errors: + def onerror(*args): + pass + elif onerror is None: + def onerror(*args): + raise + names = [] + try: + names = os.listdir(path) + except os.error, err: + onerror(os.listdir, path, sys.exc_info()) + for name in names: + fullname = os.path.join(path, name) + try: + mode = os.lstat(fullname).st_mode + except os.error: + mode = 0 + if stat.S_ISDIR(mode): + rmtree(fullname, ignore_errors, onerror) + else: + try: + os.remove(fullname) + except os.error, err: + onerror(os.remove, fullname, sys.exc_info()) + try: + os.rmdir(path) + except os.error: + onerror(os.rmdir, path, sys.exc_info()) + +def bootstrap(): + # This function is called when setuptools*.egg is run using /bin/sh + import setuptools; argv0 = os.path.dirname(setuptools.__path__[0]) + sys.argv[0] = argv0; sys.argv.append(argv0); main() + + +def main(argv=None, **kw): + from setuptools import setup + from setuptools.dist import Distribution + import distutils.core + + USAGE = """\ +usage: %(script)s [options] requirement_or_url ... + or: %(script)s --help +""" + + def gen_usage (script_name): + script = os.path.basename(script_name) + return USAGE % vars() + + def with_ei_usage(f): + old_gen_usage = distutils.core.gen_usage + try: + distutils.core.gen_usage = gen_usage + return f() + finally: + distutils.core.gen_usage = old_gen_usage + + class DistributionWithoutHelpCommands(Distribution): + common_usage = "" + def _show_help(self,*args,**kw): + with_ei_usage(lambda: Distribution._show_help(self,*args,**kw)) + + if argv is None: + argv = sys.argv[1:] + + with_ei_usage(lambda: + setup( + script_args = ['-q','easy_install', '-v']+argv, + script_name = sys.argv[0] or 'easy_install', + distclass=DistributionWithoutHelpCommands, **kw + ) + ) + + + + diff -Nru tahoe-lafs-1.9.2/setuptools-0.6c16dev4.egg/setuptools/command/egg_info.py tahoe-lafs-1.10.0/setuptools-0.6c16dev4.egg/setuptools/command/egg_info.py --- tahoe-lafs-1.9.2/setuptools-0.6c16dev4.egg/setuptools/command/egg_info.py 1970-01-01 00:00:00.000000000 +0000 +++ tahoe-lafs-1.10.0/setuptools-0.6c16dev4.egg/setuptools/command/egg_info.py 2013-09-03 15:38:27.000000000 +0000 @@ -0,0 +1,451 @@ +"""setuptools.command.egg_info + +Create a distribution's .egg-info directory and contents""" + +# This module should be kept compatible with Python 2.3 +import os, re +from setuptools import Command +from distutils.errors import * +from distutils import log +from setuptools.command.sdist import sdist +from distutils.util import convert_path +from distutils.filelist import FileList +from pkg_resources import parse_requirements, safe_name, parse_version, \ + safe_version, yield_lines, EntryPoint, iter_entry_points, to_filename +from sdist import walk_revctrl + +class egg_info(Command): + description = "create a distribution's .egg-info directory" + + user_options = [ + ('egg-base=', 'e', "directory containing .egg-info directories" + " (default: top of the source tree)"), + ('tag-svn-revision', 'r', + "Add subversion revision ID to version number"), + ('tag-date', 'd', "Add date stamp (e.g. 20050528) to version number"), + ('tag-build=', 'b', "Specify explicit tag to add to version number"), + ('no-svn-revision', 'R', + "Don't add subversion revision ID [default]"), + ('no-date', 'D', "Don't include date stamp [default]"), + ] + + boolean_options = ['tag-date', 'tag-svn-revision'] + negative_opt = {'no-svn-revision': 'tag-svn-revision', + 'no-date': 'tag-date'} + + + + + + + + def initialize_options(self): + self.egg_name = None + self.egg_version = None + self.egg_base = None + self.egg_info = None + self.tag_build = None + self.tag_svn_revision = 0 + self.tag_date = 0 + self.broken_egg_info = False + self.vtags = None + + def save_version_info(self, filename): + from setopt import edit_config + edit_config( + filename, + {'egg_info': + {'tag_svn_revision':0, 'tag_date': 0, 'tag_build': self.tags()} + } + ) + + + + + + + + + + + + + + + + + + + + + + + def finalize_options (self): + self.egg_name = safe_name(self.distribution.get_name()) + self.vtags = self.tags() + self.egg_version = self.tagged_version() + + try: + list( + parse_requirements('%s==%s' % (self.egg_name,self.egg_version)) + ) + except ValueError: + raise DistutilsOptionError( + "Invalid distribution name or version syntax: %s-%s" % + (self.egg_name,self.egg_version) + ) + + if self.egg_base is None: + dirs = self.distribution.package_dir + self.egg_base = (dirs or {}).get('',os.curdir) + + self.ensure_dirname('egg_base') + self.egg_info = to_filename(self.egg_name)+'.egg-info' + if self.egg_base != os.curdir: + self.egg_info = os.path.join(self.egg_base, self.egg_info) + if '-' in self.egg_name: self.check_broken_egg_info() + + # Set package version for the benefit of dumber commands + # (e.g. sdist, bdist_wininst, etc.) + # + self.distribution.metadata.version = self.egg_version + + # If we bootstrapped around the lack of a PKG-INFO, as might be the + # case in a fresh checkout, make sure that any special tags get added + # to the version info + # + pd = self.distribution._patched_dist + if pd is not None and pd.key==self.egg_name.lower(): + pd._version = self.egg_version + pd._parsed_version = parse_version(self.egg_version) + self.distribution._patched_dist = None + + + def write_or_delete_file(self, what, filename, data, force=False): + """Write `data` to `filename` or delete if empty + + If `data` is non-empty, this routine is the same as ``write_file()``. + If `data` is empty but not ``None``, this is the same as calling + ``delete_file(filename)`. If `data` is ``None``, then this is a no-op + unless `filename` exists, in which case a warning is issued about the + orphaned file (if `force` is false), or deleted (if `force` is true). + """ + if data: + self.write_file(what, filename, data) + elif os.path.exists(filename): + if data is None and not force: + log.warn( + "%s not set in setup(), but %s exists", what, filename + ) + return + else: + self.delete_file(filename) + + def write_file(self, what, filename, data): + """Write `data` to `filename` (if not a dry run) after announcing it + + `what` is used in a log message to identify what is being written + to the file. + """ + log.info("writing %s to %s", what, filename) + if not self.dry_run: + f = open(filename, 'wb') + f.write(data) + f.close() + + def delete_file(self, filename): + """Delete `filename` (if not a dry run) after announcing it""" + log.info("deleting %s", filename) + if not self.dry_run: + os.unlink(filename) + + def tagged_version(self): + return safe_version(self.distribution.get_version() + self.vtags) + + def run(self): + self.mkpath(self.egg_info) + installer = self.distribution.fetch_build_egg + for ep in iter_entry_points('egg_info.writers'): + writer = ep.load(installer=installer) + writer(self, ep.name, os.path.join(self.egg_info,ep.name)) + + # Get rid of native_libs.txt if it was put there by older bdist_egg + nl = os.path.join(self.egg_info, "native_libs.txt") + if os.path.exists(nl): + self.delete_file(nl) + + self.find_sources() + + def tags(self): + version = '' + if self.tag_build: + version+=self.tag_build + if self.tag_svn_revision and ( + os.path.exists('.svn') or os.path.exists('PKG-INFO') + ): version += '-r%s' % self.get_svn_revision() + if self.tag_date: + import time; version += time.strftime("-%Y%m%d") + return version + + + + + + + + + + + + + + + + + + def get_svn_revision(self): + revision = 0 + urlre = re.compile('url="([^"]+)"') + revre = re.compile('committed-rev="(\d+)"') + + for base,dirs,files in os.walk(os.curdir): + if '.svn' not in dirs: + dirs[:] = [] + continue # no sense walking uncontrolled subdirs + dirs.remove('.svn') + f = open(os.path.join(base,'.svn','entries')) + data = f.read() + f.close() + + if data.startswith('9 and d[9]]+[0]) + if base==os.curdir: + base_url = dirurl+'/' # save the root url + elif not dirurl.startswith(base_url): + dirs[:] = [] + continue # not part of the same svn tree, skip it + revision = max(revision, localrev) + + return str(revision or get_pkg_info_revision()) + + + + + def find_sources(self): + """Generate SOURCES.txt manifest file""" + manifest_filename = os.path.join(self.egg_info,"SOURCES.txt") + mm = manifest_maker(self.distribution) + mm.manifest = manifest_filename + mm.run() + self.filelist = mm.filelist + + def check_broken_egg_info(self): + bei = self.egg_name+'.egg-info' + if self.egg_base != os.curdir: + bei = os.path.join(self.egg_base, bei) + if os.path.exists(bei): + log.warn( + "-"*78+'\n' + "Note: Your current .egg-info directory has a '-' in its name;" + '\nthis will not work correctly with "setup.py develop".\n\n' + 'Please rename %s to %s to correct this problem.\n'+'-'*78, + bei, self.egg_info + ) + self.broken_egg_info = self.egg_info + self.egg_info = bei # make it work for now + +class FileList(FileList): + """File list that accepts only existing, platform-independent paths""" + + def append(self, item): + if item.endswith('\r'): # Fix older sdists built on Windows + item = item[:-1] + path = convert_path(item) + if os.path.exists(path): + self.files.append(path) + + + + + + + + + +class manifest_maker(sdist): + + template = "MANIFEST.in" + + def initialize_options (self): + self.use_defaults = 1 + self.prune = 1 + self.manifest_only = 1 + self.force_manifest = 1 + + def finalize_options(self): + pass + + def run(self): + self.filelist = FileList() + if not os.path.exists(self.manifest): + self.write_manifest() # it must exist so it'll get in the list + self.filelist.findall() + self.add_defaults() + if os.path.exists(self.template): + self.read_template() + self.prune_file_list() + self.filelist.sort() + self.filelist.remove_duplicates() + self.write_manifest() + + def write_manifest (self): + """Write the file list in 'self.filelist' (presumably as filled in + by 'add_defaults()' and 'read_template()') to the manifest file + named by 'self.manifest'. + """ + files = self.filelist.files + if os.sep!='/': + files = [f.replace(os.sep,'/') for f in files] + self.execute(write_file, (self.manifest, files), + "writing manifest file '%s'" % self.manifest) + + def warn(self, msg): # suppress missing-file warnings from sdist + if not msg.startswith("standard file not found:"): + sdist.warn(self, msg) + + def add_defaults(self): + sdist.add_defaults(self) + self.filelist.append(self.template) + self.filelist.append(self.manifest) + rcfiles = list(walk_revctrl()) + if rcfiles: + self.filelist.extend(rcfiles) + elif os.path.exists(self.manifest): + self.read_manifest() + ei_cmd = self.get_finalized_command('egg_info') + self.filelist.include_pattern("*", prefix=ei_cmd.egg_info) + + def prune_file_list (self): + build = self.get_finalized_command('build') + base_dir = self.distribution.get_fullname() + self.filelist.exclude_pattern(None, prefix=build.build_base) + self.filelist.exclude_pattern(None, prefix=base_dir) + sep = re.escape(os.sep) + self.filelist.exclude_pattern(sep+r'(RCS|CVS|\.svn)'+sep, is_regex=1) + + +def write_file (filename, contents): + """Create a file with the specified name and write 'contents' (a + sequence of strings without line terminators) to it. + """ + f = open(filename, "wb") # always write POSIX-style manifest + f.write("\n".join(contents)) + f.close() + + + + + + + + + + + + + +def write_pkg_info(cmd, basename, filename): + log.info("writing %s", filename) + if not cmd.dry_run: + metadata = cmd.distribution.metadata + metadata.version, oldver = cmd.egg_version, metadata.version + metadata.name, oldname = cmd.egg_name, metadata.name + try: + # write unescaped data to PKG-INFO, so older pkg_resources + # can still parse it + metadata.write_pkg_info(cmd.egg_info) + finally: + metadata.name, metadata.version = oldname, oldver + + safe = getattr(cmd.distribution,'zip_safe',None) + import bdist_egg; bdist_egg.write_safety_flag(cmd.egg_info, safe) + +def warn_depends_obsolete(cmd, basename, filename): + if os.path.exists(filename): + log.warn( + "WARNING: 'depends.txt' is not used by setuptools 0.6!\n" + "Use the install_requires/extras_require setup() args instead." + ) + + +def write_requirements(cmd, basename, filename): + dist = cmd.distribution + data = ['\n'.join(yield_lines(dist.install_requires or ()))] + for extra,reqs in (dist.extras_require or {}).items(): + data.append('\n\n[%s]\n%s' % (extra, '\n'.join(yield_lines(reqs)))) + cmd.write_or_delete_file("requirements", filename, ''.join(data)) + +def write_toplevel_names(cmd, basename, filename): + pkgs = dict.fromkeys( + [k.split('.',1)[0] + for k in cmd.distribution.iter_distribution_names() + ] + ) + cmd.write_file("top-level names", filename, '\n'.join(pkgs)+'\n') + + + +def overwrite_arg(cmd, basename, filename): + write_arg(cmd, basename, filename, True) + +def write_arg(cmd, basename, filename, force=False): + argname = os.path.splitext(basename)[0] + value = getattr(cmd.distribution, argname, None) + if value is not None: + value = '\n'.join(value)+'\n' + cmd.write_or_delete_file(argname, filename, value, force) + +def write_entries(cmd, basename, filename): + ep = cmd.distribution.entry_points + + if isinstance(ep,basestring) or ep is None: + data = ep + elif ep is not None: + data = [] + for section, contents in ep.items(): + if not isinstance(contents,basestring): + contents = EntryPoint.parse_group(section, contents) + contents = '\n'.join(map(str,contents.values())) + data.append('[%s]\n%s\n\n' % (section,contents)) + data = ''.join(data) + + cmd.write_or_delete_file('entry points', filename, data, True) + +def get_pkg_info_revision(): + # See if we can get a -r### off of PKG-INFO, in case this is an sdist of + # a subversion revision + # + if os.path.exists('PKG-INFO'): + f = open('PKG-INFO','rU') + for line in f: + match = re.match(r"Version:.*-r(\d+)\s*$", line) + if match: + return int(match.group(1)) + return 0 + + + +# diff -Nru tahoe-lafs-1.9.2/setuptools-0.6c16dev4.egg/setuptools/command/install.py tahoe-lafs-1.10.0/setuptools-0.6c16dev4.egg/setuptools/command/install.py --- tahoe-lafs-1.9.2/setuptools-0.6c16dev4.egg/setuptools/command/install.py 1970-01-01 00:00:00.000000000 +0000 +++ tahoe-lafs-1.10.0/setuptools-0.6c16dev4.egg/setuptools/command/install.py 2013-09-03 15:38:27.000000000 +0000 @@ -0,0 +1,123 @@ +import setuptools, sys, glob +from distutils.command.install import install as _install +from distutils.errors import DistutilsArgError + +class install(_install): + """Use easy_install to install the package, w/dependencies""" + + user_options = _install.user_options + [ + ('old-and-unmanageable', None, "Try not to use this!"), + ('single-version-externally-managed', None, + "used by system package builders to create 'flat' eggs"), + ] + boolean_options = _install.boolean_options + [ + 'old-and-unmanageable', 'single-version-externally-managed', + ] + new_commands = [ + ('install_egg_info', lambda self: True), + ('install_scripts', lambda self: True), + ] + _nc = dict(new_commands) + sub_commands = [ + cmd for cmd in _install.sub_commands if cmd[0] not in _nc + ] + new_commands + + def initialize_options(self): + _install.initialize_options(self) + self.old_and_unmanageable = None + self.single_version_externally_managed = None + self.no_compile = None # make DISTUTILS_DEBUG work right! + + def finalize_options(self): + _install.finalize_options(self) + if self.root: + self.single_version_externally_managed = True + elif self.single_version_externally_managed: + if not self.root and not self.record: + raise DistutilsArgError( + "You must specify --record or --root when building system" + " packages" + ) + + def handle_extra_path(self): + if self.root or self.single_version_externally_managed: + # explicit backward-compatibility mode, allow extra_path to work + return _install.handle_extra_path(self) + + # Ignore extra_path when installing an egg (or being run by another + # command without --root or --single-version-externally-managed + self.path_file = None + self.extra_dirs = '' + + def run(self): + self.old_run() + if sys.platform == "win32": + from setuptools.command.scriptsetup import do_scriptsetup + do_scriptsetup() + + def old_run(self): + # Explicit request for old-style install? Just do it + if self.old_and_unmanageable or self.single_version_externally_managed: + return _install.run(self) + + # Attempt to detect whether we were called from setup() or by another + # command. If we were called by setup(), our caller will be the + # 'run_command' method in 'distutils.dist', and *its* caller will be + # the 'run_commands' method. If we were called any other way, our + # immediate caller *might* be 'run_command', but it won't have been + # called by 'run_commands'. This is slightly kludgy, but seems to + # work. + # + caller = sys._getframe(2) + caller_module = caller.f_globals.get('__name__','') + caller_name = caller.f_code.co_name + + if caller_module != 'distutils.dist' or caller_name!='run_commands': + # We weren't called from the command line or setup(), so we + # should run in backward-compatibility mode to support bdist_* + # commands. + _install.run(self) + else: + self.do_egg_install() + + def do_egg_install(self): + + easy_install = self.distribution.get_command_class('easy_install') + + cmd = easy_install( + self.distribution, args="x", root=self.root, record=self.record, + ) + cmd.ensure_finalized() # finalize before bdist_egg munges install cmd + cmd.always_copy_from = '.' # make sure local-dir eggs get installed + + # pick up setup-dir .egg files only: no .egg-info + cmd.package_index.scan(glob.glob('*.egg')) + + self.run_command('bdist_egg') + args = [self.distribution.get_command_obj('bdist_egg').egg_output] + + if setuptools.bootstrap_install_from: + # Bootstrap self-installation of setuptools + args.insert(0, setuptools.bootstrap_install_from) + + cmd.args = args + cmd.run() + setuptools.bootstrap_install_from = None + + + + + + + + + + + + + + + + + +# diff -Nru tahoe-lafs-1.9.2/setuptools-0.6c16dev4.egg/setuptools/command/install_egg_info.py tahoe-lafs-1.10.0/setuptools-0.6c16dev4.egg/setuptools/command/install_egg_info.py --- tahoe-lafs-1.9.2/setuptools-0.6c16dev4.egg/setuptools/command/install_egg_info.py 1970-01-01 00:00:00.000000000 +0000 +++ tahoe-lafs-1.10.0/setuptools-0.6c16dev4.egg/setuptools/command/install_egg_info.py 2013-09-03 15:38:27.000000000 +0000 @@ -0,0 +1,123 @@ +from setuptools import Command +from setuptools.archive_util import unpack_archive +from distutils import log, dir_util +import os, shutil, pkg_resources + +class install_egg_info(Command): + """Install an .egg-info directory for the package""" + + description = "Install an .egg-info directory for the package" + + user_options = [ + ('install-dir=', 'd', "directory to install to"), + ] + + def initialize_options(self): + self.install_dir = None + + def finalize_options(self): + self.set_undefined_options('install_lib',('install_dir','install_dir')) + ei_cmd = self.get_finalized_command("egg_info") + basename = pkg_resources.Distribution( + None, None, ei_cmd.egg_name, ei_cmd.egg_version + ).egg_name()+'.egg-info' + self.source = ei_cmd.egg_info + self.target = os.path.join(self.install_dir, basename) + self.outputs = [self.target] + + def run(self): + self.run_command('egg_info') + target = self.target + if os.path.isdir(self.target) and not os.path.islink(self.target): + dir_util.remove_tree(self.target, dry_run=self.dry_run) + elif os.path.exists(self.target): + self.execute(os.unlink,(self.target,),"Removing "+self.target) + if not self.dry_run: + pkg_resources.ensure_directory(self.target) + self.execute(self.copytree, (), + "Copying %s to %s" % (self.source, self.target) + ) + self.install_namespaces() + + def get_outputs(self): + return self.outputs + + def copytree(self): + # Copy the .egg-info tree to site-packages + def skimmer(src,dst): + # filter out source-control directories; note that 'src' is always + # a '/'-separated path, regardless of platform. 'dst' is a + # platform-specific path. + for skip in '.svn/','CVS/': + if src.startswith(skip) or '/'+skip in src: + return None + self.outputs.append(dst) + log.debug("Copying %s to %s", src, dst) + return dst + unpack_archive(self.source, self.target, skimmer) + + + + + + + + + + + + + + + + + + + + + + + + + + def install_namespaces(self): + nsp = self._get_all_ns_packages() + if not nsp: return + filename,ext = os.path.splitext(self.target) + filename += '-nspkg.pth'; self.outputs.append(filename) + log.info("Installing %s",filename) + if not self.dry_run: + f = open(filename,'wb') + for pkg in nsp: + pth = tuple(pkg.split('.')) + trailer = '\n' + if '.' in pkg: + trailer = ( + "; m and setattr(sys.modules[%r], %r, m)\n" + % ('.'.join(pth[:-1]), pth[-1]) + ) + f.write( + "import sys,new,os; " + "p = os.path.join(sys._getframe(1).f_locals['sitedir'], " + "*%(pth)r); " + "ie = os.path.exists(os.path.join(p,'__init__.py')); " + "m = not ie and " + "sys.modules.setdefault(%(pkg)r,new.module(%(pkg)r)); " + "mp = (m or []) and m.__dict__.setdefault('__path__',[]); " + "(p not in mp) and mp.append(p)%(trailer)s" + % locals() + ) + f.close() + + def _get_all_ns_packages(self): + nsp = {} + for pkg in self.distribution.namespace_packages or []: + pkg = pkg.split('.') + while pkg: + nsp['.'.join(pkg)] = 1 + pkg.pop() + nsp=list(nsp) + nsp.sort() # set up shorter names first + return nsp + + diff -Nru tahoe-lafs-1.9.2/setuptools-0.6c16dev4.egg/setuptools/command/install_lib.py tahoe-lafs-1.10.0/setuptools-0.6c16dev4.egg/setuptools/command/install_lib.py --- tahoe-lafs-1.9.2/setuptools-0.6c16dev4.egg/setuptools/command/install_lib.py 1970-01-01 00:00:00.000000000 +0000 +++ tahoe-lafs-1.10.0/setuptools-0.6c16dev4.egg/setuptools/command/install_lib.py 2013-09-03 15:38:27.000000000 +0000 @@ -0,0 +1,76 @@ +from distutils.command.install_lib import install_lib as _install_lib +import os + +class install_lib(_install_lib): + """Don't add compiled flags to filenames of non-Python files""" + + def _bytecode_filenames (self, py_filenames): + bytecode_files = [] + for py_file in py_filenames: + if not py_file.endswith('.py'): + continue + if self.compile: + bytecode_files.append(py_file + "c") + if self.optimize > 0: + bytecode_files.append(py_file + "o") + + return bytecode_files + + def run(self): + self.build() + outfiles = self.install() + if outfiles is not None: + # always compile, in case we have any extension stubs to deal with + self.byte_compile(outfiles) + + def get_exclusions(self): + exclude = {} + nsp = self.distribution.namespace_packages + + if (nsp and self.get_finalized_command('install') + .single_version_externally_managed + ): + for pkg in nsp: + parts = pkg.split('.') + while parts: + pkgdir = os.path.join(self.install_dir, *parts) + for f in '__init__.py', '__init__.pyc', '__init__.pyo': + exclude[os.path.join(pkgdir,f)] = 1 + parts.pop() + return exclude + + def copy_tree( + self, infile, outfile, + preserve_mode=1, preserve_times=1, preserve_symlinks=0, level=1 + ): + assert preserve_mode and preserve_times and not preserve_symlinks + exclude = self.get_exclusions() + + if not exclude: + return _install_lib.copy_tree(self, infile, outfile) + + # Exclude namespace package __init__.py* files from the output + + from setuptools.archive_util import unpack_directory + from distutils import log + + outfiles = [] + + def pf(src, dst): + if dst in exclude: + log.warn("Skipping installation of %s (namespace package)",dst) + return False + + log.info("copying %s -> %s", src, os.path.dirname(dst)) + outfiles.append(dst) + return dst + + unpack_directory(infile, outfile, pf) + return outfiles + + def get_outputs(self): + outputs = _install_lib.get_outputs(self) + exclude = self.get_exclusions() + if exclude: + return [f for f in outputs if f not in exclude] + return outputs diff -Nru tahoe-lafs-1.9.2/setuptools-0.6c16dev4.egg/setuptools/command/install_scripts.py tahoe-lafs-1.10.0/setuptools-0.6c16dev4.egg/setuptools/command/install_scripts.py --- tahoe-lafs-1.9.2/setuptools-0.6c16dev4.egg/setuptools/command/install_scripts.py 1970-01-01 00:00:00.000000000 +0000 +++ tahoe-lafs-1.10.0/setuptools-0.6c16dev4.egg/setuptools/command/install_scripts.py 2013-09-03 15:38:27.000000000 +0000 @@ -0,0 +1,82 @@ +from distutils.command.install_scripts import install_scripts \ + as _install_scripts +from easy_install import get_script_args, sys_executable, chmod +from pkg_resources import Distribution, PathMetadata, ensure_directory +import os +from distutils import log + +class install_scripts(_install_scripts): + """Do normal script install, plus any egg_info wrapper scripts""" + + def initialize_options(self): + _install_scripts.initialize_options(self) + self.no_ep = False + + def run(self): + self.run_command("egg_info") + if self.distribution.scripts: + _install_scripts.run(self) # run first to set up self.outfiles + else: + self.outfiles = [] + if self.no_ep: + # don't install entry point scripts into .egg file! + return + + ei_cmd = self.get_finalized_command("egg_info") + dist = Distribution( + ei_cmd.egg_base, PathMetadata(ei_cmd.egg_base, ei_cmd.egg_info), + ei_cmd.egg_name, ei_cmd.egg_version, + ) + bs_cmd = self.get_finalized_command('build_scripts') + executable = getattr(bs_cmd,'executable',sys_executable) + is_wininst = getattr( + self.get_finalized_command("bdist_wininst"), '_is_running', False + ) + for args in get_script_args(dist, executable, is_wininst): + self.write_script(*args) + + + + + + def write_script(self, script_name, contents, mode="t", *ignored): + """Write an executable file to the scripts directory""" + log.info("Installing %s script to %s", script_name, self.install_dir) + target = os.path.join(self.install_dir, script_name) + self.outfiles.append(target) + + if not self.dry_run: + ensure_directory(target) + f = open(target,"w"+mode) + f.write(contents) + f.close() + chmod(target,0755) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru tahoe-lafs-1.9.2/setuptools-0.6c16dev4.egg/setuptools/command/register.py tahoe-lafs-1.10.0/setuptools-0.6c16dev4.egg/setuptools/command/register.py --- tahoe-lafs-1.9.2/setuptools-0.6c16dev4.egg/setuptools/command/register.py 1970-01-01 00:00:00.000000000 +0000 +++ tahoe-lafs-1.10.0/setuptools-0.6c16dev4.egg/setuptools/command/register.py 2013-09-03 15:38:27.000000000 +0000 @@ -0,0 +1,10 @@ +from distutils.command.register import register as _register + +class register(_register): + __doc__ = _register.__doc__ + + def run(self): + # Make sure that we are using valid current name/version info + self.run_command('egg_info') + _register.run(self) + diff -Nru tahoe-lafs-1.9.2/setuptools-0.6c16dev4.egg/setuptools/command/rotate.py tahoe-lafs-1.10.0/setuptools-0.6c16dev4.egg/setuptools/command/rotate.py --- tahoe-lafs-1.9.2/setuptools-0.6c16dev4.egg/setuptools/command/rotate.py 1970-01-01 00:00:00.000000000 +0000 +++ tahoe-lafs-1.10.0/setuptools-0.6c16dev4.egg/setuptools/command/rotate.py 2013-09-03 15:38:27.000000000 +0000 @@ -0,0 +1,57 @@ +import distutils, os +from setuptools import Command +from distutils.util import convert_path +from distutils import log +from distutils.errors import * + +class rotate(Command): + """Delete older distributions""" + + description = "delete older distributions, keeping N newest files" + user_options = [ + ('match=', 'm', "patterns to match (required)"), + ('dist-dir=', 'd', "directory where the distributions are"), + ('keep=', 'k', "number of matching distributions to keep"), + ] + + boolean_options = [] + + def initialize_options(self): + self.match = None + self.dist_dir = None + self.keep = None + + def finalize_options(self): + if self.match is None: + raise DistutilsOptionError( + "Must specify one or more (comma-separated) match patterns " + "(e.g. '.zip' or '.egg')" + ) + if self.keep is None: + raise DistutilsOptionError("Must specify number of files to keep") + try: + self.keep = int(self.keep) + except ValueError: + raise DistutilsOptionError("--keep must be an integer") + if isinstance(self.match, basestring): + self.match = [ + convert_path(p.strip()) for p in self.match.split(',') + ] + self.set_undefined_options('bdist',('dist_dir', 'dist_dir')) + + def run(self): + self.run_command("egg_info") + from glob import glob + for pattern in self.match: + pattern = self.distribution.get_name()+'*'+pattern + files = glob(os.path.join(self.dist_dir,pattern)) + files = [(os.path.getmtime(f),f) for f in files] + files.sort() + files.reverse() + + log.info("%d file(s) matching %s", len(files), pattern) + files = files[self.keep:] + for (t,f) in files: + log.info("Deleting %s", f) + if not self.dry_run: + os.unlink(f) diff -Nru tahoe-lafs-1.9.2/setuptools-0.6c16dev4.egg/setuptools/command/saveopts.py tahoe-lafs-1.10.0/setuptools-0.6c16dev4.egg/setuptools/command/saveopts.py --- tahoe-lafs-1.9.2/setuptools-0.6c16dev4.egg/setuptools/command/saveopts.py 1970-01-01 00:00:00.000000000 +0000 +++ tahoe-lafs-1.10.0/setuptools-0.6c16dev4.egg/setuptools/command/saveopts.py 2013-09-03 15:38:27.000000000 +0000 @@ -0,0 +1,24 @@ +import distutils, os +from setuptools import Command +from setuptools.command.setopt import edit_config, option_base + +class saveopts(option_base): + """Save command-line options to a file""" + + description = "save supplied options to setup.cfg or other config file" + + def run(self): + dist = self.distribution + commands = dist.command_options.keys() + settings = {} + + for cmd in commands: + + if cmd=='saveopts': + continue # don't save our own options! + + for opt,(src,val) in dist.get_option_dict(cmd).items(): + if src=="command line": + settings.setdefault(cmd,{})[opt] = val + + edit_config(self.filename, settings, self.dry_run) diff -Nru tahoe-lafs-1.9.2/setuptools-0.6c16dev4.egg/setuptools/command/scriptsetup.py tahoe-lafs-1.10.0/setuptools-0.6c16dev4.egg/setuptools/command/scriptsetup.py --- tahoe-lafs-1.9.2/setuptools-0.6c16dev4.egg/setuptools/command/scriptsetup.py 1970-01-01 00:00:00.000000000 +0000 +++ tahoe-lafs-1.10.0/setuptools-0.6c16dev4.egg/setuptools/command/scriptsetup.py 2013-09-03 15:38:27.000000000 +0000 @@ -0,0 +1,284 @@ +from distutils.errors import DistutilsSetupError +from setuptools import Command +import sys + +class scriptsetup(Command): + action = (sys.platform == "win32" + and "set up .pyscript association and PATHEXT variable to run scripts" + or "this does nothing on non-Windows platforms") + + user_options = [ + ('allusers', 'a', + 'make changes for all users of this Windows installation (requires Administrator privileges)'), + ] + boolean_options = ['allusers'] + + def initialize_options(self): + self.allusers = False + + def finalize_options(self): + pass + + def run(self): + if sys.platform != "win32": + print "\n'scriptsetup' isn't needed on non-Windows platforms." + else: + do_scriptsetup(self.allusers) + + +def do_scriptsetup(allusers=False): + print "\nSetting up environment to run scripts for %s..." % (allusers and "all users" or "the current user") + + from _winreg import HKEY_CURRENT_USER, HKEY_LOCAL_MACHINE, HKEY_CLASSES_ROOT, \ + REG_SZ, REG_EXPAND_SZ, KEY_QUERY_VALUE, KEY_SET_VALUE, \ + OpenKey, CreateKey, QueryValueEx, SetValueEx, FlushKey, CloseKey + + USER_ENV = "Environment" + try: + user_env = OpenKey(HKEY_CURRENT_USER, USER_ENV, 0, KEY_QUERY_VALUE) + except WindowsError, e: + raise DistutilsSetupError("I could not read the user environment from the registry.\n%r" % (e,)) + + SYSTEM_ENV = "SYSTEM\\CurrentControlSet\\Control\\Session Manager\\Environment" + try: + system_env = OpenKey(HKEY_LOCAL_MACHINE, SYSTEM_ENV, 0, KEY_QUERY_VALUE) + except WindowsError, e: + raise DistutilsSetupError("I could not read the system environment from the registry.\n%r" % (e,)) + + + # HKEY_CLASSES_ROOT is a merged view that would only confuse us. + # + + USER_CLASSES = "SOFTWARE\\Classes" + try: + user_classes = OpenKey(HKEY_CURRENT_USER, USER_CLASSES, 0, KEY_QUERY_VALUE) + except WindowsError, e: + raise DistutilsSetupError("I could not read the user filetype associations from the registry.\n%r" % (e,)) + + SYSTEM_CLASSES = "SOFTWARE\\Classes" + try: + system_classes = OpenKey(HKEY_LOCAL_MACHINE, SYSTEM_CLASSES, 0, KEY_QUERY_VALUE) + except WindowsError, e: + raise DistutilsSetupError("I could not read the system filetype associations from the registry.\n%r" % (e,)) + + + def query(key, subkey, what): + try: + (value, type) = QueryValueEx(key, subkey) + except WindowsError, e: + if e.winerror == 2: # not found + return None + raise DistutilsSetupError("I could not read %s from the registry.\n%r" % (what, e)) + + # It does not matter that we don't expand environment strings, in fact it's better not to. + + if type != REG_SZ and type != REG_EXPAND_SZ: + raise DistutilsSetupError("I expected the registry entry for %s to have a string type (REG_SZ or REG_EXPAND_SZ), " + "and was flummoxed by it having type code %r." % (what, type)) + return (value, type) + + + def open_and_query(key, path, subkey, what): + try: + read_key = OpenKey(key, path, 0, KEY_QUERY_VALUE) + except WindowsError, e: + if e.winerror == 2: # not found + return None + raise DistutilsSetupError("I could not read %s from the registry because I could not open " + "the parent key.\n%r" % (what, e)) + + try: + return query(read_key, subkey, what) + finally: + CloseKey(read_key) + + + def update(key_name_path, subkey, desired_value, desired_type, goal, what): + (key, name, path) = key_name_path + + (old_value, old_type) = open_and_query(key, path, subkey, what) or (None, None) + if (old_value, old_type) == (desired_value, desired_type): + print "Already done: %s." % (goal,) + return False + + try: + update_key = OpenKey(key, path, 0, KEY_SET_VALUE|KEY_QUERY_VALUE) + except WindowsError, e: + if e.winerror != 2: + raise DistutilsSetupError("I tried to %s, but was not successful because I could not open " + "the registry key %s\\%s for writing.\n%r" + % (goal, name, path, e)) + try: + update_key = CreateKey(key, path) + except WindowsError, e: + raise DistutilsSetupError("I tried to %s, but was not successful because the registry key %s\\%s " + "did not exist, and I was unable to create it.\n%r" + % (goal, name, path, e)) + + (new_value, new_type) = (None, None) + try: + SetValueEx(update_key, subkey, 0, desired_type, desired_value) + except WindowsError, e: + raise DistutilsSetupError("I tried to %s, but was not able to set the subkey %r under %s\\%s to be %r.\n%r" + % (goal, subkey, name, path, desired_value)) + else: + (new_value, new_type) = query(update_key, subkey, what) or (None, None) + finally: + FlushKey(update_key) + CloseKey(update_key) + + if (new_value, new_type) != (desired_value, desired_type): + raise DistutilsSetupError("I tried to %s by setting the subkey %r under %s\\%s to be %r, " + "and the call to SetValueEx succeeded, but the value ended up as " + "%r instead (it was previously %r). Maybe the update was unexpectedly virtualized?" + % (goal, subkey, name, path, desired_value, new_value, old_value)) + + print "Done: %s." % (goal,) + return True + + + # Maintenance hazard: 'add_to_environment' and 'associate' use very similar, but not identical logic. + + def add_to_environment(varname, addition, change_allusers): + changed = False + what = "the %s environment variable %s" % (change_allusers and "system" or "user", varname) + goal = "add %s to %s" % (addition, what) + + system_valueandtype = query(system_env, varname, "the system environment variable %s" % (varname,)) + user_valueandtype = query(user_env, varname, "the user environment variable %s" % (varname,)) + + if change_allusers: + (value, type) = system_valueandtype or (u'', REG_SZ) + key_name_path = (HKEY_LOCAL_MACHINE, "HKEY_LOCAL_MACHINE", SYSTEM_ENV) + else: + (value, type) = user_valueandtype or system_valueandtype or (u'', REG_SZ) + key_name_path = (HKEY_CURRENT_USER, "HKEY_CURRENT_USER", USER_ENV) + + if addition.lower() in value.lower().split(u';'): + print "Already done: %s." % (goal,) + else: + changed |= update(key_name_path, varname, value + u';' + addition, type, goal, what) + + if change_allusers: + # Also change any overriding environment entry for the current user. + (user_value, user_type) = user_valueandtype or (u'', REG_SZ) + split_value = user_value.lower().split(u';') + + if not (addition.lower() in split_value or u'%'+varname.lower()+u'%' in split_value): + now_what = "the overriding user environment variable %s" % (varname,) + changed |= update((HKEY_CURRENT_USER, "HKEY_CURRENT_USER", USER_ENV), + varname, user_value + u';' + addition, user_type, + "add %s to %s" % (addition, now_what), now_what) + + return changed + + + def associate(ext, target, change_allusers): + changed = False + what = "the %s association for %s" % (change_allusers and "system" or "user", ext) + goal = "associate the filetype %s with %s for %s" % (ext, target, change_allusers and "all users" or "the current user") + + try: + if change_allusers: + target_key = OpenKey(HKEY_LOCAL_MACHINE, "%s\\%s" % (SYSTEM_CLASSES, target), 0, KEY_QUERY_VALUE) + else: + target_key = OpenKey(HKEY_CLASSES_ROOT, target, 0, KEY_QUERY_VALUE) + except WindowsError, e: + raise DistutilsSetupError("I was going to %s, but that won't work because the %s class does not exist in the registry, " + "as far as I can tell.\n%r" % (goal, target, e)) + CloseKey(target_key) + + system_key_name_path = (HKEY_LOCAL_MACHINE, "HKEY_LOCAL_MACHINE", "%s\\%s" % (SYSTEM_CLASSES, ext)) + user_key_name_path = (HKEY_CURRENT_USER, "HKEY_CURRENT_USER", "%s\\%s" % (USER_CLASSES, ext)) + + system_valueandtype = open_and_query(system_classes, ext, "", "the system association for %s" % (ext,)) + user_valueandtype = open_and_query(user_classes, ext, "", "the user association for %s" % (ext,)) + + if change_allusers: + (value, type) = system_valueandtype or (u'', REG_SZ) + key_name_path = system_key_name_path + else: + (value, type) = user_valueandtype or system_valueandtype or (u'', REG_SZ) + key_name_path = user_key_name_path + + if value == target: + print "Already done: %s." % (goal,) + else: + changed |= update(key_name_path, "", unicode(target), REG_SZ, goal, what) + + if change_allusers: + # Also change any overriding association for the current user. + (user_value, user_type) = user_valueandtype or (u'', REG_SZ) + + if user_value != target: + changed |= update(user_key_name_path, "", unicode(target), REG_SZ, + "associate the filetype %s with %s for the current user " \ + "(because the system association is overridden)" % (ext, target), + "the overriding user association for %s" % (ext,)) + + return changed + + + def broadcast_settingchange(change_allusers): + print "Broadcasting that the environment has changed, please wait..." + + # + # + # LRESULT WINAPI SendMessageTimeoutW(HWND hWnd, UINT msg, WPARAM wParam, LPARAM lParam, + # UINT fuFlags, UINT uTimeout, PDWORD_PTR lpdwResult); + + try: + from ctypes import WINFUNCTYPE, POINTER, windll, addressof, c_wchar_p + from ctypes.wintypes import LONG, HWND, UINT, WPARAM, LPARAM, DWORD + + SendMessageTimeout = WINFUNCTYPE(POINTER(LONG), HWND, UINT, WPARAM, LPARAM, UINT, UINT, POINTER(POINTER(DWORD))) \ + (("SendMessageTimeoutW", windll.user32)) + HWND_BROADCAST = 0xFFFF + WM_SETTINGCHANGE = 0x001A + SMTO_ABORTIFHUNG = 0x0002 + SendMessageTimeout(HWND_BROADCAST, WM_SETTINGCHANGE, change_allusers and 1 or 0, + addressof(c_wchar_p(u"Environment")), SMTO_ABORTIFHUNG, 5000, None); + except Exception, e: + print "Warning: %r" % (e,) + + + changed_assoc = associate(".pyscript", "Python.File", allusers) + + changed_env = False + try: + changed_env |= add_to_environment("PATHEXT", ".pyscript", allusers) + changed_env |= add_to_environment("PATHEXT", ".pyw", allusers) + finally: + CloseKey(user_env) + CloseKey(system_env) + + if changed_assoc or changed_env: + broadcast_settingchange(allusers) + + if changed_env: + # whether logout is needed seems to randomly differ between installations + # of XP, but it is not needed in Vista or later. + try: + import platform, re + need_logout = not re.search(r'^[6-9]|([1-9][0-9]+)\.', platform.version()) + except Exception, e: + e # hush pyflakes + need_logout = True + + if need_logout: + print """ +*********************************************************************** +Changes have been made to the persistent environment, but they may not +take effect in this Windows session. Running installed Python scripts +from a Command Prompt may only work after you have logged out and back +in again, or rebooted. +*********************************************************************** +""" + else: + print """ +*********************************************************************** +Changes have been made to the persistent environment, but not in this +Command Prompt. Running installed Python scripts will only work from +new Command Prompts opened from now on. +*********************************************************************** +""" diff -Nru tahoe-lafs-1.9.2/setuptools-0.6c16dev4.egg/setuptools/command/sdist.py tahoe-lafs-1.10.0/setuptools-0.6c16dev4.egg/setuptools/command/sdist.py --- tahoe-lafs-1.9.2/setuptools-0.6c16dev4.egg/setuptools/command/sdist.py 1970-01-01 00:00:00.000000000 +0000 +++ tahoe-lafs-1.10.0/setuptools-0.6c16dev4.egg/setuptools/command/sdist.py 2013-09-03 15:38:27.000000000 +0000 @@ -0,0 +1,246 @@ +from distutils.command.sdist import sdist as _sdist +from distutils.util import convert_path +from distutils import log +from glob import glob +import os, re, sys, pkg_resources + +entities = [ + ("<","<"), (">", ">"), (""", '"'), ("'", "'"), + ("&", "&") +] + +def unescape(data): + for old,new in entities: + data = data.replace(old,new) + return data + +def re_finder(pattern, postproc=None): + def find(dirname, filename): + f = open(filename,'rU') + data = f.read() + f.close() + for match in pattern.finditer(data): + path = match.group(1) + if postproc: + path = postproc(path) + yield joinpath(dirname,path) + return find + +def joinpath(prefix,suffix): + if not prefix: + return suffix + return os.path.join(prefix,suffix) + + + + + + + + + +def walk_revctrl(dirname=''): + """Find all files under revision control""" + for ep in pkg_resources.iter_entry_points('setuptools.file_finders'): + for item in ep.load()(dirname): + yield item + +def _default_revctrl(dirname=''): + for path, finder in finders: + path = joinpath(dirname,path) + if os.path.isfile(path): + for path in finder(dirname,path): + if os.path.isfile(path): + yield path + elif os.path.isdir(path): + for item in _default_revctrl(path): + yield item + +def externals_finder(dirname, filename): + """Find any 'svn:externals' directories""" + found = False + f = open(filename,'rb') + for line in iter(f.readline, ''): # can't use direct iter! + parts = line.split() + if len(parts)==2: + kind,length = parts + data = f.read(int(length)) + if kind=='K' and data=='svn:externals': + found = True + elif kind=='V' and found: + f.close() + break + else: + f.close() + return + + for line in data.splitlines(): + parts = line.split() + if parts: + yield joinpath(dirname, parts[0]) + + +entries_pattern = re.compile(r'name="([^"]+)"(?![^>]+deleted="true")', re.I) + +def entries_finder(dirname, filename): + f = open(filename,'rU') + data = f.read() + f.close() + if data.startswith('=6 and record[5]=="delete": + continue # skip deleted + yield joinpath(dirname, record[0]) + + +finders = [ + (convert_path('CVS/Entries'), + re_finder(re.compile(r"^\w?/([^/]+)/", re.M))), + (convert_path('.svn/entries'), entries_finder), + (convert_path('.svn/dir-props'), externals_finder), + (convert_path('.svn/dir-prop-base'), externals_finder), # svn 1.4 +] + + + + + + + + + + + + +class sdist(_sdist): + """Smart sdist that finds anything supported by revision control""" + + user_options = [ + ('formats=', None, + "formats for source distribution (comma-separated list)"), + ('keep-temp', 'k', + "keep the distribution tree around after creating " + + "archive file(s)"), + ('dist-dir=', 'd', + "directory to put the source distribution archive(s) in " + "[default: dist]"), + ] + + negative_opt = {} + + def run(self): + self.run_command('egg_info') + ei_cmd = self.get_finalized_command('egg_info') + self.filelist = ei_cmd.filelist + self.filelist.append(os.path.join(ei_cmd.egg_info,'SOURCES.txt')) + self.check_readme() + self.check_metadata() + self.make_distribution() + + dist_files = getattr(self.distribution,'dist_files',[]) + for file in self.archive_files: + data = ('sdist', '', file) + if data not in dist_files: + dist_files.append(data) + + def read_template(self): + try: + _sdist.read_template(self) + except: + # grody hack to close the template file (MANIFEST.in) + # this prevents easy_install's attempt at deleting the file from + # dying and thus masking the real error + sys.exc_info()[2].tb_next.tb_frame.f_locals['template'].close() + raise + + # Cribbed from old distutils code, to work around new distutils code + # that tries to do some of the same stuff as we do, in a way that makes + # us loop. + + def add_defaults (self): + standards = [('README', 'README.txt'), self.distribution.script_name] + + for fn in standards: + if type(fn) is tuple: + alts = fn + got_it = 0 + for fn in alts: + if os.path.exists(fn): + got_it = 1 + self.filelist.append(fn) + break + + if not got_it: + self.warn("standard file not found: should have one of " + + ', '.join(alts)) + else: + if os.path.exists(fn): + self.filelist.append(fn) + else: + self.warn("standard file '%s' not found" % fn) + + optional = ['test/test*.py', 'setup.cfg'] + + for pattern in optional: + files = filter(os.path.isfile, glob(pattern)) + if files: + self.filelist.extend(files) + + if self.distribution.has_pure_modules(): + build_py = self.get_finalized_command('build_py') + self.filelist.extend(build_py.get_source_files()) + + if self.distribution.has_ext_modules(): + build_ext = self.get_finalized_command('build_ext') + self.filelist.extend(build_ext.get_source_files()) + + if self.distribution.has_c_libraries(): + build_clib = self.get_finalized_command('build_clib') + self.filelist.extend(build_clib.get_source_files()) + + if self.distribution.has_scripts(): + build_scripts = self.get_finalized_command('build_scripts') + self.filelist.extend(build_scripts.get_source_files()) + + + def check_readme(self): + alts = ("README", "README.txt") + for f in alts: + if os.path.exists(f): + return + else: + self.warn( + "standard file not found: should have one of " +', '.join(alts) + ) + + + def make_release_tree(self, base_dir, files): + _sdist.make_release_tree(self, base_dir, files) + + # Save any egg_info command line options used to create this sdist + dest = os.path.join(base_dir, 'setup.cfg') + if hasattr(os,'link') and os.path.exists(dest): + # unlink and re-copy, since it might be hard-linked, and + # we don't want to change the source version + os.unlink(dest) + self.copy_file('setup.cfg', dest) + + self.get_finalized_command('egg_info').save_version_info(dest) + + + + + + + + +# diff -Nru tahoe-lafs-1.9.2/setuptools-0.6c16dev4.egg/setuptools/command/setopt.py tahoe-lafs-1.10.0/setuptools-0.6c16dev4.egg/setuptools/command/setopt.py --- tahoe-lafs-1.9.2/setuptools-0.6c16dev4.egg/setuptools/command/setopt.py 1970-01-01 00:00:00.000000000 +0000 +++ tahoe-lafs-1.10.0/setuptools-0.6c16dev4.egg/setuptools/command/setopt.py 2013-09-03 15:38:27.000000000 +0000 @@ -0,0 +1,158 @@ +import distutils, os +from setuptools import Command +from distutils.util import convert_path +from distutils import log +from distutils.errors import * + +__all__ = ['config_file', 'edit_config', 'option_base', 'setopt'] + + +def config_file(kind="local"): + """Get the filename of the distutils, local, global, or per-user config + + `kind` must be one of "local", "global", or "user" + """ + if kind=='local': + return 'setup.cfg' + if kind=='global': + return os.path.join( + os.path.dirname(distutils.__file__),'distutils.cfg' + ) + if kind=='user': + dot = os.name=='posix' and '.' or '' + return os.path.expanduser(convert_path("~/%spydistutils.cfg" % dot)) + raise ValueError( + "config_file() type must be 'local', 'global', or 'user'", kind + ) + + + + + + + + + + + + + + + +def edit_config(filename, settings, dry_run=False): + """Edit a configuration file to include `settings` + + `settings` is a dictionary of dictionaries or ``None`` values, keyed by + command/section name. A ``None`` value means to delete the entire section, + while a dictionary lists settings to be changed or deleted in that section. + A setting of ``None`` means to delete that setting. + """ + from ConfigParser import RawConfigParser + log.debug("Reading configuration from %s", filename) + opts = RawConfigParser() + opts.read([filename]) + for section, options in settings.items(): + if options is None: + log.info("Deleting section [%s] from %s", section, filename) + opts.remove_section(section) + else: + if not opts.has_section(section): + log.debug("Adding new section [%s] to %s", section, filename) + opts.add_section(section) + for option,value in options.items(): + if value is None: + log.debug("Deleting %s.%s from %s", + section, option, filename + ) + opts.remove_option(section,option) + if not opts.options(section): + log.info("Deleting empty [%s] section from %s", + section, filename) + opts.remove_section(section) + else: + log.debug( + "Setting %s.%s to %r in %s", + section, option, value, filename + ) + opts.set(section,option,value) + + log.info("Writing %s", filename) + if not dry_run: + f = open(filename,'w'); opts.write(f); f.close() + +class option_base(Command): + """Abstract base class for commands that mess with config files""" + + user_options = [ + ('global-config', 'g', + "save options to the site-wide distutils.cfg file"), + ('user-config', 'u', + "save options to the current user's pydistutils.cfg file"), + ('filename=', 'f', + "configuration file to use (default=setup.cfg)"), + ] + + boolean_options = [ + 'global-config', 'user-config', + ] + + def initialize_options(self): + self.global_config = None + self.user_config = None + self.filename = None + + def finalize_options(self): + filenames = [] + if self.global_config: + filenames.append(config_file('global')) + if self.user_config: + filenames.append(config_file('user')) + if self.filename is not None: + filenames.append(self.filename) + if not filenames: + filenames.append(config_file('local')) + if len(filenames)>1: + raise DistutilsOptionError( + "Must specify only one configuration file option", + filenames + ) + self.filename, = filenames + + + + +class setopt(option_base): + """Save command-line options to a file""" + + description = "set an option in setup.cfg or another config file" + + user_options = [ + ('command=', 'c', 'command to set an option for'), + ('option=', 'o', 'option to set'), + ('set-value=', 's', 'value of the option'), + ('remove', 'r', 'remove (unset) the value'), + ] + option_base.user_options + + boolean_options = option_base.boolean_options + ['remove'] + + def initialize_options(self): + option_base.initialize_options(self) + self.command = None + self.option = None + self.set_value = None + self.remove = None + + def finalize_options(self): + option_base.finalize_options(self) + if self.command is None or self.option is None: + raise DistutilsOptionError("Must specify --command *and* --option") + if self.set_value is None and not self.remove: + raise DistutilsOptionError("Must specify --set-value or --remove") + + def run(self): + edit_config( + self.filename, { + self.command: {self.option.replace('-','_'):self.set_value} + }, + self.dry_run + ) diff -Nru tahoe-lafs-1.9.2/setuptools-0.6c16dev4.egg/setuptools/command/test.py tahoe-lafs-1.10.0/setuptools-0.6c16dev4.egg/setuptools/command/test.py --- tahoe-lafs-1.9.2/setuptools-0.6c16dev4.egg/setuptools/command/test.py 1970-01-01 00:00:00.000000000 +0000 +++ tahoe-lafs-1.10.0/setuptools-0.6c16dev4.egg/setuptools/command/test.py 2013-09-03 15:38:27.000000000 +0000 @@ -0,0 +1,164 @@ +from setuptools import Command +from distutils.errors import DistutilsOptionError +import sys +from pkg_resources import * +from unittest import TestLoader, main + +class ScanningLoader(TestLoader): + + def loadTestsFromModule(self, module): + """Return a suite of all tests cases contained in the given module + + If the module is a package, load tests from all the modules in it. + If the module has an ``additional_tests`` function, call it and add + the return value to the tests. + """ + tests = [] + if module.__name__!='setuptools.tests.doctest': # ugh + tests.append(TestLoader.loadTestsFromModule(self,module)) + + if hasattr(module, "additional_tests"): + tests.append(module.additional_tests()) + + if hasattr(module, '__path__'): + for file in resource_listdir(module.__name__, ''): + if file.endswith('.py') and file!='__init__.py': + submodule = module.__name__+'.'+file[:-3] + else: + if resource_exists( + module.__name__, file+'/__init__.py' + ): + submodule = module.__name__+'.'+file + else: + continue + tests.append(self.loadTestsFromName(submodule)) + + if len(tests)!=1: + return self.suiteClass(tests) + else: + return tests[0] # don't create a nested suite for only one return + + +class test(Command): + """Command to run unit tests after in-place build""" + + description = "run unit tests after in-place build" + + user_options = [ + ('test-module=','m', "Run 'test_suite' in specified module"), + ('test-suite=','s', + "Test suite to run (e.g. 'some_module.test_suite')"), + ('test-runner=','r', "Test runner to use"), + ] + + def initialize_options(self): + self.test_runner = None + self.test_suite = None + self.test_module = None + self.test_loader = None + + def finalize_options(self): + if self.test_suite is None: + if self.test_module is None: + self.test_suite = self.distribution.test_suite + else: + self.test_suite = self.test_module+".test_suite" + elif self.test_module: + raise DistutilsOptionError( + "You may specify a module or a suite, but not both" + ) + + self.test_args = [self.test_suite] + + if self.verbose: + self.test_args.insert(0,'--verbose') + if self.test_loader is None: + self.test_loader = getattr(self.distribution,'test_loader',None) + if self.test_loader is None: + self.test_loader = "setuptools.command.test:ScanningLoader" + if self.test_runner is None: + self.test_runner = getattr(self.distribution,'test_runner',None) + + + def with_project_on_sys_path(self, func): + # Ensure metadata is up-to-date + self.run_command('egg_info') + + # Build extensions in-place + self.reinitialize_command('build_ext', inplace=1) + self.run_command('build_ext') + + ei_cmd = self.get_finalized_command("egg_info") + + old_path = sys.path[:] + old_modules = sys.modules.copy() + + try: + sys.path.insert(0, normalize_path(ei_cmd.egg_base)) + working_set.__init__() + add_activation_listener(lambda dist: dist.activate()) + require('%s==%s' % (ei_cmd.egg_name, ei_cmd.egg_version)) + func() + finally: + sys.path[:] = old_path + sys.modules.clear() + sys.modules.update(old_modules) + working_set.__init__() + + + def run(self): + if self.distribution.install_requires: + self.distribution.fetch_build_eggs(self.distribution.install_requires) + if self.distribution.tests_require: + self.distribution.fetch_build_eggs(self.distribution.tests_require) + + if self.test_suite: + cmd = ' '.join(self.test_args) + if self.dry_run: + self.announce('skipping "unittest %s" (dry run)' % cmd) + else: + self.announce('running "unittest %s"' % cmd) + self.with_project_on_sys_path(self.run_tests) + + + def run_tests(self): + import unittest + loader_ep = EntryPoint.parse("x="+self.test_loader) + loader_class = loader_ep.load(require=False) + kw = {} + if self.test_runner is not None: + runner_ep = EntryPoint.parse("x="+self.test_runner) + runner_class = runner_ep.load(require=False) + kw['testRunner'] = runner_class() + unittest.main( + None, None, [unittest.__file__]+self.test_args, + testLoader = loader_class(), **kw + ) + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru tahoe-lafs-1.9.2/setuptools-0.6c16dev4.egg/setuptools/command/upload.py tahoe-lafs-1.10.0/setuptools-0.6c16dev4.egg/setuptools/command/upload.py --- tahoe-lafs-1.9.2/setuptools-0.6c16dev4.egg/setuptools/command/upload.py 1970-01-01 00:00:00.000000000 +0000 +++ tahoe-lafs-1.10.0/setuptools-0.6c16dev4.egg/setuptools/command/upload.py 2013-09-03 15:38:27.000000000 +0000 @@ -0,0 +1,181 @@ +"""distutils.command.upload + +Implements the Distutils 'upload' subcommand (upload package to PyPI).""" + +from distutils.errors import * +from distutils.core import Command +from distutils.spawn import spawn +from distutils import log +try: + from hashlib import md5 +except ImportError: + from md5 import md5 +import os +import socket +import platform +import ConfigParser +import httplib +import base64 +import urlparse +import cStringIO as StringIO + +class upload(Command): + + description = "upload binary package to PyPI" + + DEFAULT_REPOSITORY = 'http://pypi.python.org/pypi' + + user_options = [ + ('repository=', 'r', + "url of repository [default: %s]" % DEFAULT_REPOSITORY), + ('show-response', None, + 'display full response text from server'), + ('sign', 's', + 'sign files to upload using gpg'), + ('identity=', 'i', 'GPG identity used to sign files'), + ] + boolean_options = ['show-response', 'sign'] + + def initialize_options(self): + self.username = '' + self.password = '' + self.repository = '' + self.show_response = 0 + self.sign = False + self.identity = None + + def finalize_options(self): + if self.identity and not self.sign: + raise DistutilsOptionError( + "Must use --sign for --identity to have meaning" + ) + if os.environ.has_key('HOME'): + rc = os.path.join(os.environ['HOME'], '.pypirc') + if os.path.exists(rc): + self.announce('Using PyPI login from %s' % rc) + config = ConfigParser.ConfigParser({ + 'username':'', + 'password':'', + 'repository':''}) + config.read(rc) + if not self.repository: + self.repository = config.get('server-login', 'repository') + if not self.username: + self.username = config.get('server-login', 'username') + if not self.password: + self.password = config.get('server-login', 'password') + if not self.repository: + self.repository = self.DEFAULT_REPOSITORY + + def run(self): + if not self.distribution.dist_files: + raise DistutilsOptionError("No dist file created in earlier command") + for command, pyversion, filename in self.distribution.dist_files: + self.upload_file(command, pyversion, filename) + + def upload_file(self, command, pyversion, filename): + # Sign if requested + if self.sign: + gpg_args = ["gpg", "--detach-sign", "-a", filename] + if self.identity: + gpg_args[2:2] = ["--local-user", self.identity] + spawn(gpg_args, + dry_run=self.dry_run) + + # Fill in the data + content = open(filename,'rb').read() + basename = os.path.basename(filename) + comment = '' + if command=='bdist_egg' and self.distribution.has_ext_modules(): + comment = "built on %s" % platform.platform(terse=1) + data = { + ':action':'file_upload', + 'protcol_version':'1', + 'name':self.distribution.get_name(), + 'version':self.distribution.get_version(), + 'content':(basename,content), + 'filetype':command, + 'pyversion':pyversion, + 'md5_digest':md5(content).hexdigest(), + } + if command == 'bdist_rpm': + dist, version, id = platform.dist() + if dist: + comment = 'built for %s %s' % (dist, version) + elif command == 'bdist_dumb': + comment = 'built for %s' % platform.platform(terse=1) + data['comment'] = comment + + if self.sign: + data['gpg_signature'] = (os.path.basename(filename) + ".asc", + open(filename+".asc").read()) + + # set up the authentication + auth = "Basic " + base64.encodestring(self.username + ":" + self.password).strip() + + # Build up the MIME payload for the POST data + boundary = '--------------GHSKFJDLGDS7543FJKLFHRE75642756743254' + sep_boundary = '\n--' + boundary + end_boundary = sep_boundary + '--' + body = StringIO.StringIO() + for key, value in data.items(): + # handle multiple entries for the same name + if type(value) != type([]): + value = [value] + for value in value: + if type(value) is tuple: + fn = ';filename="%s"' % value[0] + value = value[1] + else: + fn = "" + value = str(value) + body.write(sep_boundary) + body.write('\nContent-Disposition: form-data; name="%s"'%key) + body.write(fn) + body.write("\n\n") + body.write(value) + if value and value[-1] == '\r': + body.write('\n') # write an extra newline (lurve Macs) + body.write(end_boundary) + body.write("\n") + body = body.getvalue() + + self.announce("Submitting %s to %s" % (filename, self.repository), log.INFO) + + # build the Request + # We can't use urllib2 since we need to send the Basic + # auth right with the first request + schema, netloc, url, params, query, fragments = \ + urlparse.urlparse(self.repository) + assert not params and not query and not fragments + if schema == 'http': + http = httplib.HTTPConnection(netloc) + elif schema == 'https': + http = httplib.HTTPSConnection(netloc) + else: + raise AssertionError, "unsupported schema "+schema + + data = '' + loglevel = log.INFO + try: + http.connect() + http.putrequest("POST", url) + http.putheader('Content-type', + 'multipart/form-data; boundary=%s'%boundary) + http.putheader('Content-length', str(len(body))) + http.putheader('Authorization', auth) + http.endheaders() + http.send(body) + except socket.error, e: + self.announce(str(e), log.ERROR) + return + + r = http.getresponse() + if r.status == 200: + self.announce('Server response (%s): %s' % (r.status, r.reason), + log.INFO) + else: + self.announce('Upload failed (%s): %s' % (r.status, r.reason), + log.ERROR) + if self.show_response: + print '-'*75, r.read(), '-'*75 diff -Nru tahoe-lafs-1.9.2/setuptools-0.6c16dev4.egg/setuptools/depends.py tahoe-lafs-1.10.0/setuptools-0.6c16dev4.egg/setuptools/depends.py --- tahoe-lafs-1.9.2/setuptools-0.6c16dev4.egg/setuptools/depends.py 1970-01-01 00:00:00.000000000 +0000 +++ tahoe-lafs-1.10.0/setuptools-0.6c16dev4.egg/setuptools/depends.py 2013-09-03 15:38:27.000000000 +0000 @@ -0,0 +1,246 @@ +from __future__ import generators +import sys, imp, marshal +from imp import PKG_DIRECTORY, PY_COMPILED, PY_SOURCE, PY_FROZEN +from distutils.version import StrictVersion, LooseVersion + +__all__ = [ + 'Require', 'find_module', 'get_module_constant', 'extract_constant' +] + +class Require: + """A prerequisite to building or installing a distribution""" + + def __init__(self,name,requested_version,module,homepage='', + attribute=None,format=None + ): + + if format is None and requested_version is not None: + format = StrictVersion + + if format is not None: + requested_version = format(requested_version) + if attribute is None: + attribute = '__version__' + + self.__dict__.update(locals()) + del self.self + + + def full_name(self): + """Return full package/distribution name, w/version""" + if self.requested_version is not None: + return '%s-%s' % (self.name,self.requested_version) + return self.name + + + def version_ok(self,version): + """Is 'version' sufficiently up-to-date?""" + return self.attribute is None or self.format is None or \ + str(version)!="unknown" and version >= self.requested_version + + + def get_version(self, paths=None, default="unknown"): + + """Get version number of installed module, 'None', or 'default' + + Search 'paths' for module. If not found, return 'None'. If found, + return the extracted version attribute, or 'default' if no version + attribute was specified, or the value cannot be determined without + importing the module. The version is formatted according to the + requirement's version format (if any), unless it is 'None' or the + supplied 'default'. + """ + + if self.attribute is None: + try: + f,p,i = find_module(self.module,paths) + if f: f.close() + return default + except ImportError: + return None + + v = get_module_constant(self.module,self.attribute,default,paths) + + if v is not None and v is not default and self.format is not None: + return self.format(v) + + return v + + + def is_present(self,paths=None): + """Return true if dependency is present on 'paths'""" + return self.get_version(paths) is not None + + + def is_current(self,paths=None): + """Return true if dependency is present and up-to-date on 'paths'""" + version = self.get_version(paths) + if version is None: + return False + return self.version_ok(version) + + +def _iter_code(code): + + """Yield '(op,arg)' pair for each operation in code object 'code'""" + + from array import array + from dis import HAVE_ARGUMENT, EXTENDED_ARG + + bytes = array('b',code.co_code) + eof = len(code.co_code) + + ptr = 0 + extended_arg = 0 + + while ptr=HAVE_ARGUMENT: + + arg = bytes[ptr+1] + bytes[ptr+2]*256 + extended_arg + ptr += 3 + + if op==EXTENDED_ARG: + extended_arg = arg * 65536L + continue + + else: + arg = None + ptr += 1 + + yield op,arg + + + + + + + + + + +def find_module(module, paths=None): + """Just like 'imp.find_module()', but with package support""" + + parts = module.split('.') + + while parts: + part = parts.pop(0) + f, path, (suffix,mode,kind) = info = imp.find_module(part, paths) + + if kind==PKG_DIRECTORY: + parts = parts or ['__init__'] + paths = [path] + + elif parts: + raise ImportError("Can't find %r in %s" % (parts,module)) + + return info + + + + + + + + + + + + + + + + + + + + + + + + +def get_module_constant(module, symbol, default=-1, paths=None): + + """Find 'module' by searching 'paths', and extract 'symbol' + + Return 'None' if 'module' does not exist on 'paths', or it does not define + 'symbol'. If the module defines 'symbol' as a constant, return the + constant. Otherwise, return 'default'.""" + + try: + f, path, (suffix,mode,kind) = find_module(module,paths) + except ImportError: + # Module doesn't exist + return None + + try: + if kind==PY_COMPILED: + f.read(8) # skip magic & date + code = marshal.load(f) + elif kind==PY_FROZEN: + code = imp.get_frozen_object(module) + elif kind==PY_SOURCE: + code = compile(f.read(), path, 'exec') + else: + # Not something we can parse; we'll have to import it. :( + if module not in sys.modules: + imp.load_module(module,f,path,(suffix,mode,kind)) + return getattr(sys.modules[module],symbol,None) + + finally: + if f: + f.close() + + return extract_constant(code,symbol,default) + + + + + + + + +def extract_constant(code,symbol,default=-1): + """Extract the constant value of 'symbol' from 'code' + + If the name 'symbol' is bound to a constant value by the Python code + object 'code', return that value. If 'symbol' is bound to an expression, + return 'default'. Otherwise, return 'None'. + + Return value is based on the first assignment to 'symbol'. 'symbol' must + be a global, or at least a non-"fast" local in the code block. That is, + only 'STORE_NAME' and 'STORE_GLOBAL' opcodes are checked, and 'symbol' + must be present in 'code.co_names'. + """ + + if symbol not in code.co_names: + # name's not there, can't possibly be an assigment + return None + + name_idx = list(code.co_names).index(symbol) + + STORE_NAME = 90 + STORE_GLOBAL = 97 + LOAD_CONST = 100 + + const = default + + for op, arg in _iter_code(code): + + if op==LOAD_CONST: + const = code.co_consts[arg] + elif arg==name_idx and (op==STORE_NAME or op==STORE_GLOBAL): + return const + else: + const = default + +if sys.platform.startswith('java') or sys.platform == 'cli': + # XXX it'd be better to test assertions about bytecode instead... + del extract_constant, get_module_constant + __all__.remove('extract_constant') + __all__.remove('get_module_constant') + + diff -Nru tahoe-lafs-1.9.2/setuptools-0.6c16dev4.egg/setuptools/dist.py tahoe-lafs-1.10.0/setuptools-0.6c16dev4.egg/setuptools/dist.py --- tahoe-lafs-1.9.2/setuptools-0.6c16dev4.egg/setuptools/dist.py 1970-01-01 00:00:00.000000000 +0000 +++ tahoe-lafs-1.10.0/setuptools-0.6c16dev4.egg/setuptools/dist.py 2013-09-03 15:38:27.000000000 +0000 @@ -0,0 +1,861 @@ +__all__ = ['Distribution'] + +from distutils.core import Distribution as _Distribution +from setuptools.depends import Require +from setuptools.command.install import install +from setuptools.command.sdist import sdist +from setuptools.command.install_lib import install_lib +from distutils.errors import DistutilsOptionError, DistutilsPlatformError +from distutils.errors import DistutilsSetupError +import setuptools, pkg_resources, distutils.core, distutils.dist, distutils.cmd +import os, distutils.log, re + +def _get_unpatched(cls): + """Protect against re-patching the distutils if reloaded + + Also ensures that no other distutils extension monkeypatched the distutils + first. + """ + while cls.__module__.startswith('setuptools'): + cls, = cls.__bases__ + if not cls.__module__.startswith('distutils'): + raise AssertionError( + "distutils has already been patched by %r" % cls + ) + return cls + +_Distribution = _get_unpatched(_Distribution) + +sequence = tuple, list + +def check_importable(dist, attr, value): + try: + ep = pkg_resources.EntryPoint.parse('x='+value) + assert not ep.extras + except (TypeError,ValueError,AttributeError,AssertionError): + raise DistutilsSetupError( + "%r must be importable 'module:attrs' string (got %r)" + % (attr,value) + ) + + +def assert_string_list(dist, attr, value): + """Verify that value is a string list or None""" + try: + assert ''.join(value)!=value + except (TypeError,ValueError,AttributeError,AssertionError): + raise DistutilsSetupError( + "%r must be a list of strings (got %r)" % (attr,value) + ) + +def check_nsp(dist, attr, value): + """Verify that namespace packages are valid""" + assert_string_list(dist,attr,value) + for nsp in value: + if not dist.has_contents_for(nsp): + raise DistutilsSetupError( + "Distribution contains no modules or packages for " + + "namespace package %r" % nsp + ) + if '.' in nsp: + parent = '.'.join(nsp.split('.')[:-1]) + if parent not in value: + distutils.log.warn( + "WARNING: %r is declared as a package namespace, but %r" + " is not: please correct this in setup.py", nsp, parent + ) + +def check_extras(dist, attr, value): + """Verify that extras_require mapping is valid""" + try: + for k,v in value.items(): + list(pkg_resources.parse_requirements(v)) + except (TypeError,ValueError,AttributeError): + raise DistutilsSetupError( + "'extras_require' must be a dictionary whose values are " + "strings or lists of strings containing valid project/version " + "requirement specifiers." + ) + + + + +def assert_bool(dist, attr, value): + """Verify that value is True, False, 0, or 1""" + if bool(value) != value: + raise DistutilsSetupError( + "%r must be a boolean value (got %r)" % (attr,value) + ) +def check_requirements(dist, attr, value): + """Verify that install_requires is a valid requirements list""" + try: + list(pkg_resources.parse_requirements(value)) + except (TypeError,ValueError): + raise DistutilsSetupError( + "%r must be a string or list of strings " + "containing valid project/version requirement specifiers" % (attr,) + ) +def check_entry_points(dist, attr, value): + """Verify that entry_points map is parseable""" + try: + pkg_resources.EntryPoint.parse_map(value) + except ValueError, e: + raise DistutilsSetupError(e) + +def check_test_suite(dist, attr, value): + if not isinstance(value,basestring): + raise DistutilsSetupError("test_suite must be a string") + +def check_package_data(dist, attr, value): + """Verify that value is a dictionary of package names to glob lists""" + if isinstance(value,dict): + for k,v in value.items(): + if not isinstance(k,str): break + try: iter(v) + except TypeError: + break + else: + return + raise DistutilsSetupError( + attr+" must be a dictionary mapping package names to lists of " + "wildcard patterns" + ) + +def check_packages(dist, attr, value): + for pkgname in value: + if not re.match(r'\w+(\.\w+)*', pkgname): + distutils.log.warn( + "WARNING: %r not a valid package name; please use only" + ".-separated package names in setup.py", pkgname + ) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +class Distribution(_Distribution): + """Distribution with support for features, tests, and package data + + This is an enhanced version of 'distutils.dist.Distribution' that + effectively adds the following new optional keyword arguments to 'setup()': + + 'install_requires' -- a string or sequence of strings specifying project + versions that the distribution requires when installed, in the format + used by 'pkg_resources.require()'. They will be installed + automatically when the package is installed. If you wish to use + packages that are not available in PyPI, or want to give your users an + alternate download location, you can add a 'find_links' option to the + '[easy_install]' section of your project's 'setup.cfg' file, and then + setuptools will scan the listed web pages for links that satisfy the + requirements. + + 'extras_require' -- a dictionary mapping names of optional "extras" to the + additional requirement(s) that using those extras incurs. For example, + this:: + + extras_require = dict(reST = ["docutils>=0.3", "reSTedit"]) + + indicates that the distribution can optionally provide an extra + capability called "reST", but it can only be used if docutils and + reSTedit are installed. If the user installs your package using + EasyInstall and requests one of your extras, the corresponding + additional requirements will be installed if needed. + + 'features' -- a dictionary mapping option names to 'setuptools.Feature' + objects. Features are a portion of the distribution that can be + included or excluded based on user options, inter-feature dependencies, + and availability on the current system. Excluded features are omitted + from all setup commands, including source and binary distributions, so + you can create multiple distributions from the same source tree. + Feature names should be valid Python identifiers, except that they may + contain the '-' (minus) sign. Features can be included or excluded + via the command line options '--with-X' and '--without-X', where 'X' is + the name of the feature. Whether a feature is included by default, and + whether you are allowed to control this from the command line, is + determined by the Feature object. See the 'Feature' class for more + information. + + 'test_suite' -- the name of a test suite to run for the 'test' command. + If the user runs 'python setup.py test', the package will be installed, + and the named test suite will be run. The format is the same as + would be used on a 'unittest.py' command line. That is, it is the + dotted name of an object to import and call to generate a test suite. + + 'package_data' -- a dictionary mapping package names to lists of filenames + or globs to use to find data files contained in the named packages. + If the dictionary has filenames or globs listed under '""' (the empty + string), those names will be searched for in every package, in addition + to any names for the specific package. Data files found using these + names/globs will be installed along with the package, in the same + location as the package. Note that globs are allowed to reference + the contents of non-package subdirectories, as long as you use '/' as + a path separator. (Globs are automatically converted to + platform-specific paths at runtime.) + + In addition to these new keywords, this class also has several new methods + for manipulating the distribution's contents. For example, the 'include()' + and 'exclude()' methods can be thought of as in-place add and subtract + commands that add or remove packages, modules, extensions, and so on from + the distribution. They are used by the feature subsystem to configure the + distribution for the included and excluded features. + """ + + _patched_dist = None + + def patch_missing_pkg_info(self, attrs): + # Fake up a replacement for the data that would normally come from + # PKG-INFO, but which might not yet be built if this is a fresh + # checkout. + # + if not attrs or 'name' not in attrs or 'version' not in attrs: + return + key = pkg_resources.safe_name(str(attrs['name'])).lower() + dist = pkg_resources.working_set.by_key.get(key) + if dist is not None and not dist.has_metadata('PKG-INFO'): + dist._version = pkg_resources.safe_version(str(attrs['version'])) + self._patched_dist = dist + + def __init__ (self, attrs=None): + have_package_data = hasattr(self, "package_data") + if not have_package_data: + self.package_data = {} + self.require_features = [] + self.features = {} + self.dist_files = [] + self.patch_missing_pkg_info(attrs) + # Make sure we have any eggs needed to interpret 'attrs' + if attrs is not None: + self.dependency_links = attrs.pop('dependency_links', []) + assert_string_list(self,'dependency_links',self.dependency_links) + if attrs and 'setup_requires' in attrs: + self.fetch_build_eggs(attrs.pop('setup_requires')) + for ep in pkg_resources.iter_entry_points('distutils.setup_keywords'): + if not hasattr(self,ep.name): + setattr(self,ep.name,None) + _Distribution.__init__(self,attrs) + if isinstance(self.metadata.version, (int,long,float)): + # Some people apparently take "version number" too literally :) + self.metadata.version = str(self.metadata.version) + + def parse_command_line(self): + """Process features after parsing command line options""" + result = _Distribution.parse_command_line(self) + if self.features: + self._finalize_features() + return result + + def _feature_attrname(self,name): + """Convert feature name to corresponding option attribute name""" + return 'with_'+name.replace('-','_') + + def fetch_build_eggs(self, requires): + """Resolve pre-setup requirements""" + from pkg_resources import working_set, parse_requirements + for dist in working_set.resolve( + parse_requirements(requires), installer=self.fetch_build_egg + ): + working_set.add(dist) + + def finalize_options(self): + _Distribution.finalize_options(self) + if self.features: + self._set_global_opts_from_features() + + for ep in pkg_resources.iter_entry_points('distutils.setup_keywords'): + value = getattr(self,ep.name,None) + if value is not None: + ep.require(installer=self.fetch_build_egg) + ep.load()(self, ep.name, value) + + def fetch_build_egg(self, req): + """Fetch an egg needed for building""" + try: + cmd = self._egg_fetcher + except AttributeError: + from setuptools.command.easy_install import easy_install + dist = self.__class__({'script_args':['easy_install']}) + dist.parse_config_files() + opts = dist.get_option_dict('easy_install') + keep = ( + 'find_links', 'site_dirs', 'index_url', 'optimize', + 'site_dirs', 'allow_hosts' + ) + for key in opts.keys(): + if key not in keep: + del opts[key] # don't use any other settings + if self.dependency_links: + links = self.dependency_links[:] + if 'find_links' in opts: + links = opts['find_links'][1].split() + links + opts['find_links'] = ('setup', links) + cmd = easy_install( + dist, args=["x"], install_dir=os.curdir, exclude_scripts=True, + always_copy=False, build_directory=None, editable=False, + upgrade=False, multi_version=True, no_report = True + ) + cmd.ensure_finalized() + self._egg_fetcher = cmd + return cmd.easy_install(req) + + def _set_global_opts_from_features(self): + """Add --with-X/--without-X options based on optional features""" + + go = [] + no = self.negative_opt.copy() + + for name,feature in self.features.items(): + self._set_feature(name,None) + feature.validate(self) + + if feature.optional: + descr = feature.description + incdef = ' (default)' + excdef='' + if not feature.include_by_default(): + excdef, incdef = incdef, excdef + + go.append(('with-'+name, None, 'include '+descr+incdef)) + go.append(('without-'+name, None, 'exclude '+descr+excdef)) + no['without-'+name] = 'with-'+name + + self.global_options = self.feature_options = go + self.global_options + self.negative_opt = self.feature_negopt = no + + + + + + + + + + + + + + + + + + + def _finalize_features(self): + """Add/remove features and resolve dependencies between them""" + + # First, flag all the enabled items (and thus their dependencies) + for name,feature in self.features.items(): + enabled = self.feature_is_included(name) + if enabled or (enabled is None and feature.include_by_default()): + feature.include_in(self) + self._set_feature(name,1) + + # Then disable the rest, so that off-by-default features don't + # get flagged as errors when they're required by an enabled feature + for name,feature in self.features.items(): + if not self.feature_is_included(name): + feature.exclude_from(self) + self._set_feature(name,0) + + + def get_command_class(self, command): + """Pluggable version of get_command_class()""" + if command in self.cmdclass: + return self.cmdclass[command] + + for ep in pkg_resources.iter_entry_points('distutils.commands',command): + ep.require(installer=self.fetch_build_egg) + self.cmdclass[command] = cmdclass = ep.load() + return cmdclass + else: + return _Distribution.get_command_class(self, command) + + def print_commands(self): + for ep in pkg_resources.iter_entry_points('distutils.commands'): + if ep.name not in self.cmdclass: + try: + cmdclass = ep.load(False) # don't require extras, we're not running + self.cmdclass[ep.name] = cmdclass + except ImportError: + pass # see https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1405 + return _Distribution.print_commands(self) + + + def _set_feature(self,name,status): + """Set feature's inclusion status""" + setattr(self,self._feature_attrname(name),status) + + def feature_is_included(self,name): + """Return 1 if feature is included, 0 if excluded, 'None' if unknown""" + return getattr(self,self._feature_attrname(name)) + + def include_feature(self,name): + """Request inclusion of feature named 'name'""" + + if self.feature_is_included(name)==0: + descr = self.features[name].description + raise DistutilsOptionError( + descr + " is required, but was excluded or is not available" + ) + self.features[name].include_in(self) + self._set_feature(name,1) + + def include(self,**attrs): + """Add items to distribution that are named in keyword arguments + + For example, 'dist.exclude(py_modules=["x"])' would add 'x' to + the distribution's 'py_modules' attribute, if it was not already + there. + + Currently, this method only supports inclusion for attributes that are + lists or tuples. If you need to add support for adding to other + attributes in this or a subclass, you can add an '_include_X' method, + where 'X' is the name of the attribute. The method will be called with + the value passed to 'include()'. So, 'dist.include(foo={"bar":"baz"})' + will try to call 'dist._include_foo({"bar":"baz"})', which can then + handle whatever special inclusion logic is needed. + """ + for k,v in attrs.items(): + include = getattr(self, '_include_'+k, None) + if include: + include(v) + else: + self._include_misc(k,v) + + def exclude_package(self,package): + """Remove packages, modules, and extensions in named package""" + + pfx = package+'.' + if self.packages: + self.packages = [ + p for p in self.packages + if p!=package and not p.startswith(pfx) + ] + + if self.py_modules: + self.py_modules = [ + p for p in self.py_modules + if p!=package and not p.startswith(pfx) + ] + + if self.ext_modules: + self.ext_modules = [ + p for p in self.ext_modules + if p.name!=package and not p.name.startswith(pfx) + ] + + + def has_contents_for(self,package): + """Return true if 'exclude_package(package)' would do something""" + + pfx = package+'.' + + for p in self.iter_distribution_names(): + if p==package or p.startswith(pfx): + return True + + + + + + + + + + + def _exclude_misc(self,name,value): + """Handle 'exclude()' for list/tuple attrs without a special handler""" + if not isinstance(value,sequence): + raise DistutilsSetupError( + "%s: setting must be a list or tuple (%r)" % (name, value) + ) + try: + old = getattr(self,name) + except AttributeError: + raise DistutilsSetupError( + "%s: No such distribution setting" % name + ) + if old is not None and not isinstance(old,sequence): + raise DistutilsSetupError( + name+": this setting cannot be changed via include/exclude" + ) + elif old: + setattr(self,name,[item for item in old if item not in value]) + + def _include_misc(self,name,value): + """Handle 'include()' for list/tuple attrs without a special handler""" + + if not isinstance(value,sequence): + raise DistutilsSetupError( + "%s: setting must be a list (%r)" % (name, value) + ) + try: + old = getattr(self,name) + except AttributeError: + raise DistutilsSetupError( + "%s: No such distribution setting" % name + ) + if old is None: + setattr(self,name,value) + elif not isinstance(old,sequence): + raise DistutilsSetupError( + name+": this setting cannot be changed via include/exclude" + ) + else: + setattr(self,name,old+[item for item in value if item not in old]) + + def exclude(self,**attrs): + """Remove items from distribution that are named in keyword arguments + + For example, 'dist.exclude(py_modules=["x"])' would remove 'x' from + the distribution's 'py_modules' attribute. Excluding packages uses + the 'exclude_package()' method, so all of the package's contained + packages, modules, and extensions are also excluded. + + Currently, this method only supports exclusion from attributes that are + lists or tuples. If you need to add support for excluding from other + attributes in this or a subclass, you can add an '_exclude_X' method, + where 'X' is the name of the attribute. The method will be called with + the value passed to 'exclude()'. So, 'dist.exclude(foo={"bar":"baz"})' + will try to call 'dist._exclude_foo({"bar":"baz"})', which can then + handle whatever special exclusion logic is needed. + """ + for k,v in attrs.items(): + exclude = getattr(self, '_exclude_'+k, None) + if exclude: + exclude(v) + else: + self._exclude_misc(k,v) + + def _exclude_packages(self,packages): + if not isinstance(packages,sequence): + raise DistutilsSetupError( + "packages: setting must be a list or tuple (%r)" % (packages,) + ) + map(self.exclude_package, packages) + + + + + + + + + + + + + def _parse_command_opts(self, parser, args): + # Remove --with-X/--without-X options when processing command args + self.global_options = self.__class__.global_options + self.negative_opt = self.__class__.negative_opt + + # First, expand any aliases + command = args[0] + aliases = self.get_option_dict('aliases') + while command in aliases: + src,alias = aliases[command] + del aliases[command] # ensure each alias can expand only once! + import shlex + args[:1] = shlex.split(alias,True) + command = args[0] + + nargs = _Distribution._parse_command_opts(self, parser, args) + + # Handle commands that want to consume all remaining arguments + cmd_class = self.get_command_class(command) + if getattr(cmd_class,'command_consumes_arguments',None): + self.get_option_dict(command)['args'] = ("command line", nargs) + if nargs is not None: + return [] + + return nargs + + + + + + + + + + + + + + + + + def get_cmdline_options(self): + """Return a '{cmd: {opt:val}}' map of all command-line options + + Option names are all long, but do not include the leading '--', and + contain dashes rather than underscores. If the option doesn't take + an argument (e.g. '--quiet'), the 'val' is 'None'. + + Note that options provided by config files are intentionally excluded. + """ + + d = {} + + for cmd,opts in self.command_options.items(): + + for opt,(src,val) in opts.items(): + + if src != "command line": + continue + + opt = opt.replace('_','-') + + if val==0: + cmdobj = self.get_command_obj(cmd) + neg_opt = self.negative_opt.copy() + neg_opt.update(getattr(cmdobj,'negative_opt',{})) + for neg,pos in neg_opt.items(): + if pos==opt: + opt=neg + val=None + break + else: + raise AssertionError("Shouldn't be able to get here") + + elif val==1: + val = None + + d.setdefault(cmd,{})[opt] = val + + return d + + + def iter_distribution_names(self): + """Yield all packages, modules, and extension names in distribution""" + + for pkg in self.packages or (): + yield pkg + + for module in self.py_modules or (): + yield module + + for ext in self.ext_modules or (): + if isinstance(ext,tuple): + name, buildinfo = ext + else: + name = ext.name + if name.endswith('module'): + name = name[:-6] + yield name + +# Install it throughout the distutils +for module in distutils.dist, distutils.core, distutils.cmd: + module.Distribution = Distribution + + + + + + + + + + + + + + + + + + + + +class Feature: + """A subset of the distribution that can be excluded if unneeded/wanted + + Features are created using these keyword arguments: + + 'description' -- a short, human readable description of the feature, to + be used in error messages, and option help messages. + + 'standard' -- if true, the feature is included by default if it is + available on the current system. Otherwise, the feature is only + included if requested via a command line '--with-X' option, or if + another included feature requires it. The default setting is 'False'. + + 'available' -- if true, the feature is available for installation on the + current system. The default setting is 'True'. + + 'optional' -- if true, the feature's inclusion can be controlled from the + command line, using the '--with-X' or '--without-X' options. If + false, the feature's inclusion status is determined automatically, + based on 'availabile', 'standard', and whether any other feature + requires it. The default setting is 'True'. + + 'require_features' -- a string or sequence of strings naming features + that should also be included if this feature is included. Defaults to + empty list. May also contain 'Require' objects that should be + added/removed from the distribution. + + 'remove' -- a string or list of strings naming packages to be removed + from the distribution if this feature is *not* included. If the + feature *is* included, this argument is ignored. This argument exists + to support removing features that "crosscut" a distribution, such as + defining a 'tests' feature that removes all the 'tests' subpackages + provided by other features. The default for this argument is an empty + list. (Note: the named package(s) or modules must exist in the base + distribution when the 'setup()' function is initially called.) + + other keywords -- any other keyword arguments are saved, and passed to + the distribution's 'include()' and 'exclude()' methods when the + feature is included or excluded, respectively. So, for example, you + could pass 'packages=["a","b"]' to cause packages 'a' and 'b' to be + added or removed from the distribution as appropriate. + + A feature must include at least one 'requires', 'remove', or other + keyword argument. Otherwise, it can't affect the distribution in any way. + Note also that you can subclass 'Feature' to create your own specialized + feature types that modify the distribution in other ways when included or + excluded. See the docstrings for the various methods here for more detail. + Aside from the methods, the only feature attributes that distributions look + at are 'description' and 'optional'. + """ + def __init__(self, description, standard=False, available=True, + optional=True, require_features=(), remove=(), **extras + ): + + self.description = description + self.standard = standard + self.available = available + self.optional = optional + if isinstance(require_features,(str,Require)): + require_features = require_features, + + self.require_features = [ + r for r in require_features if isinstance(r,str) + ] + er = [r for r in require_features if not isinstance(r,str)] + if er: extras['require_features'] = er + + if isinstance(remove,str): + remove = remove, + self.remove = remove + self.extras = extras + + if not remove and not require_features and not extras: + raise DistutilsSetupError( + "Feature %s: must define 'require_features', 'remove', or at least one" + " of 'packages', 'py_modules', etc." + ) + + def include_by_default(self): + """Should this feature be included by default?""" + return self.available and self.standard + + def include_in(self,dist): + + """Ensure feature and its requirements are included in distribution + + You may override this in a subclass to perform additional operations on + the distribution. Note that this method may be called more than once + per feature, and so should be idempotent. + + """ + + if not self.available: + raise DistutilsPlatformError( + self.description+" is required," + "but is not available on this platform" + ) + + dist.include(**self.extras) + + for f in self.require_features: + dist.include_feature(f) + + + + def exclude_from(self,dist): + + """Ensure feature is excluded from distribution + + You may override this in a subclass to perform additional operations on + the distribution. This method will be called at most once per + feature, and only after all included features have been asked to + include themselves. + """ + + dist.exclude(**self.extras) + + if self.remove: + for item in self.remove: + dist.exclude_package(item) + + + + def validate(self,dist): + + """Verify that feature makes sense in context of distribution + + This method is called by the distribution just before it parses its + command line. It checks to ensure that the 'remove' attribute, if any, + contains only valid package/module names that are present in the base + distribution when 'setup()' is called. You may override it in a + subclass to perform any other required validation of the feature + against a target distribution. + """ + + for item in self.remove: + if not dist.has_contents_for(item): + raise DistutilsSetupError( + "%s wants to be able to remove %s, but the distribution" + " doesn't contain any packages or modules under %s" + % (self.description, item, item) + ) + + + + + + + + + + + + + + + + + + + + + + diff -Nru tahoe-lafs-1.9.2/setuptools-0.6c16dev4.egg/setuptools/extension.py tahoe-lafs-1.10.0/setuptools-0.6c16dev4.egg/setuptools/extension.py --- tahoe-lafs-1.9.2/setuptools-0.6c16dev4.egg/setuptools/extension.py 1970-01-01 00:00:00.000000000 +0000 +++ tahoe-lafs-1.10.0/setuptools-0.6c16dev4.egg/setuptools/extension.py 2013-09-03 15:38:27.000000000 +0000 @@ -0,0 +1,35 @@ +from distutils.core import Extension as _Extension +from dist import _get_unpatched +_Extension = _get_unpatched(_Extension) + +try: + from Pyrex.Distutils.build_ext import build_ext +except ImportError: + have_pyrex = False +else: + have_pyrex = True + + +class Extension(_Extension): + """Extension that uses '.c' files in place of '.pyx' files""" + + if not have_pyrex: + # convert .pyx extensions to .c + def __init__(self,*args,**kw): + _Extension.__init__(self,*args,**kw) + sources = [] + for s in self.sources: + if s.endswith('.pyx'): + sources.append(s[:-3]+'c') + else: + sources.append(s) + self.sources = sources + +class Library(Extension): + """Just like a regular Extension, but built as a library instead""" + +import sys, distutils.core, distutils.extension +distutils.core.Extension = Extension +distutils.extension.Extension = Extension +if 'distutils.command.build_ext' in sys.modules: + sys.modules['distutils.command.build_ext'].Extension = Extension diff -Nru tahoe-lafs-1.9.2/setuptools-0.6c16dev4.egg/setuptools/package_index.py tahoe-lafs-1.10.0/setuptools-0.6c16dev4.egg/setuptools/package_index.py --- tahoe-lafs-1.9.2/setuptools-0.6c16dev4.egg/setuptools/package_index.py 1970-01-01 00:00:00.000000000 +0000 +++ tahoe-lafs-1.10.0/setuptools-0.6c16dev4.egg/setuptools/package_index.py 2013-09-03 15:38:27.000000000 +0000 @@ -0,0 +1,798 @@ +"""PyPI and direct package downloading""" +import sys, os.path, re, urlparse, urllib2, shutil, random, socket, cStringIO +import httplib, urllib +from pkg_resources import * +from distutils import log +from distutils.errors import DistutilsError +try: + from hashlib import md5 +except ImportError: + from md5 import md5 +from fnmatch import translate +EGG_FRAGMENT = re.compile(r'^egg=([-A-Za-z0-9_.]+)$') +HREF = re.compile("""href\\s*=\\s*['"]?([^'"> ]+)""", re.I) +# this is here to fix emacs' cruddy broken syntax highlighting +PYPI_MD5 = re.compile( + '([^<]+)\n\s+\\(md5\\)' +) +URL_SCHEME = re.compile('([-+.a-z0-9]{2,}):',re.I).match +EXTENSIONS = ".tar.gz .tar.bz2 .tar .zip .tgz".split() + +def is_local(url_or_fname): + """ Return True if url_or_fname is a "file:" url or if it is a schemaless thing (which is presumably a filename). """ + mo = URL_SCHEME(url_or_fname) + return not (mo and mo.group(1).lower()!='file') + +def url_or_fname_to_fname(url_or_fname): + """ Assert that is_local(url_or_fname) then if it is a "file:" url, parse it and run url2pathname on it, else just return it. """ + assert is_local(url_or_fname) + + mo = URL_SCHEME(url_or_fname) + if mo: + return urllib2.url2pathname(urlparse.urlparse(url)[2]) + else: + return url_or_fname + +__all__ = [ + 'PackageIndex', 'distros_for_url', 'parse_bdist_wininst', + 'interpret_distro_name', +] + +def parse_bdist_wininst(name): + """Return (base,pyversion) or (None,None) for possible .exe name""" + + lower = name.lower() + base, py_ver = None, None + + if lower.endswith('.exe'): + if lower.endswith('.win32.exe'): + base = name[:-10] + elif lower.startswith('.win32-py',-16): + py_ver = name[-7:-4] + base = name[:-16] + + return base,py_ver + +def egg_info_for_url(url): + scheme, server, path, parameters, query, fragment = urlparse.urlparse(url) + base = urllib2.unquote(path.split('/')[-1]) + if server=='sourceforge.net' and base=='download': # XXX Yuck + base = urllib2.unquote(path.split('/')[-2]) + if '#' in base: base, fragment = base.split('#',1) + return base,fragment + +def distros_for_url(url, metadata=None): + """Yield egg or source distribution objects that might be found at a URL""" + base, fragment = egg_info_for_url(url) + for dist in distros_for_location(url, base, metadata): yield dist + if fragment: + match = EGG_FRAGMENT.match(fragment) + if match: + for dist in interpret_distro_name( + url, match.group(1), metadata, precedence = CHECKOUT_DIST + ): + yield dist + +def distros_for_location(location, basename, metadata=None): + """Yield egg or source distribution objects based on basename""" + if basename.endswith('.egg.zip'): + basename = basename[:-4] # strip the .zip + if basename.endswith('.egg') and '-' in basename: + # only one, unambiguous interpretation + return [Distribution.from_location(location, basename, metadata)] + if basename.endswith('.exe'): + win_base, py_ver = parse_bdist_wininst(basename) + if win_base is not None: + return interpret_distro_name( + location, win_base, metadata, py_ver, BINARY_DIST, "win32" + ) + # Try source distro extensions (.zip, .tgz, etc.) + # + for ext in EXTENSIONS: + if basename.endswith(ext): + basename = basename[:-len(ext)] + return interpret_distro_name(location, basename, metadata) + return [] # no extension matched + +def distros_for_filename(filename, metadata=None): + """Yield possible egg or source distribution objects based on a filename""" + return distros_for_location( + normalize_path(filename), os.path.basename(filename), metadata + ) + + +def interpret_distro_name(location, basename, metadata, + py_version=None, precedence=SOURCE_DIST, platform=None +): + """Generate alternative interpretations of a source distro name + + Note: if `location` is a filesystem filename, you should call + ``pkg_resources.normalize_path()`` on it before passing it to this + routine! + """ + # Generate alternative interpretations of a source distro name + # Because some packages are ambiguous as to name/versions split + # e.g. "adns-python-1.1.0", "egenix-mx-commercial", etc. + # So, we generate each possible interepretation (e.g. "adns, python-1.1.0" + # "adns-python, 1.1.0", and "adns-python-1.1.0, no version"). In practice, + # the spurious interpretations should be ignored, because in the event + # there's also an "adns" package, the spurious "python-1.1.0" version will + # compare lower than any numeric version number, and is therefore unlikely + # to match a request for it. It's still a potential problem, though, and + # in the long run PyPI and the distutils should go for "safe" names and + # versions in distribution archive names (sdist and bdist). + + parts = basename.split('-') + if not py_version: + for i,p in enumerate(parts[2:]): + if len(p)==5 and p.startswith('py2.'): + return # It's a bdist_dumb, not an sdist -- bail out + + for p in range(1,len(parts)+1): + yield Distribution( + location, metadata, '-'.join(parts[:p]), '-'.join(parts[p:]), + py_version=py_version, precedence = precedence, + platform = platform + ) + +REL = re.compile("""<([^>]*\srel\s*=\s*['"]?([^'">]+)[^>]*)>""", re.I) +# this line is here to fix emacs' cruddy broken syntax highlighting + +def find_external_links(url, page): + """Find rel="homepage" and rel="download" links in `page`, yielding URLs""" + + for match in REL.finditer(page): + tag, rel = match.groups() + rels = map(str.strip, rel.lower().split(',')) + if 'homepage' in rels or 'download' in rels: + for match in HREF.finditer(tag): + yield urlparse.urljoin(url, htmldecode(match.group(1))) + + for tag in ("Home Page", "Download URL"): + pos = page.find(tag) + if pos!=-1: + match = HREF.search(page,pos) + if match: + yield urlparse.urljoin(url, htmldecode(match.group(1))) + +user_agent = "Python-urllib/%s setuptools/%s" % ( + urllib2.__version__, require('setuptools')[0].version +) + + +class PackageIndex(Environment): + """A distribution index that scans web pages for download URLs""" + + def __init__(self, index_url="http://pypi.python.org/simple", hosts=('*',), + *args, **kw + ): + Environment.__init__(self,*args,**kw) + self.index_url = index_url + "/"[:not index_url.endswith('/')] + self.scanned_urls = {} + self.fetched_urls = {} + self.package_pages = {} + self.allows = re.compile('|'.join(map(translate,hosts))).match + self.to_scan = [] + + + + def process_url(self, url, retrieve=False): + """Evaluate a URL as a possible download, and maybe retrieve it""" + if url in self.scanned_urls and not retrieve: + return + self.scanned_urls[url] = True + if not URL_SCHEME(url): + self.process_filename(url) + return + else: + dists = list(distros_for_url(url)) + if dists: + if not self.url_ok(url): + return + self.debug("Found link: %s", url) + + if dists or not retrieve or url in self.fetched_urls: + map(self.add, dists) + return # don't need the actual page + + if not self.url_ok(url): + self.fetched_urls[url] = True + return + + self.info("Reading %s", url) + self.fetched_urls[url] = True # prevent multiple fetch attempts + f = self.open_url(url, "Download error: %s -- Some packages may not be found!") + if f is None: return + self.fetched_urls[f.url] = True + if 'html' not in f.headers.get('content-type', '').lower(): + f.close() # not html, we can't process it + return + + base = f.url # handle redirects + page = f.read() + f.close() + if url.startswith(self.index_url) and getattr(f,'code',None)!=404: + page = self.process_index(url, page) + for match in HREF.finditer(page): + link = urlparse.urljoin(base, htmldecode(match.group(1))) + self.process_url(link) + + def process_filename(self, fn, nested=False): + # process filenames or directories + if not os.path.exists(fn): + self.warn("Not found: %s", fn) + return + + if os.path.isdir(fn) and not nested: + path = os.path.realpath(fn) + for item in os.listdir(path): + self.process_filename(os.path.join(path,item), True) + + dists = distros_for_filename(fn) + if dists: + self.debug("Found: %s", fn) + map(self.add, dists) + + def url_ok(self, url, fatal=False): + s = URL_SCHEME(url) + if (s and s.group(1).lower()=='file') or self.allows(urlparse.urlparse(url)[1]): + return True + msg = "\nLink to % s ***BLOCKED*** by --allow-hosts\n" + if fatal: + raise DistutilsError(msg % url) + else: + self.warn(msg, url) + + def scan_egg_links(self, search_path): + for item in search_path: + if os.path.isdir(item): + for entry in os.listdir(item): + if entry.endswith('.egg-link'): + self.scan_egg_link(item, entry) + + def scan_egg_link(self, path, entry): + lines = filter(None, map(str.strip, file(os.path.join(path, entry)))) + if len(lines)==2: + for dist in find_distributions(os.path.join(path, lines[0])): + dist.location = os.path.join(path, *lines) + dist.precedence = SOURCE_DIST + self.add(dist) + + def process_index(self,url,page): + """Process the contents of a PyPI page""" + def scan(link): + # Process a URL to see if it's for a package page + if link.startswith(self.index_url): + parts = map( + urllib2.unquote, link[len(self.index_url):].split('/') + ) + if len(parts)==2 and '#' not in parts[1]: + # it's a package page, sanitize and index it + pkg = safe_name(parts[0]) + ver = safe_version(parts[1]) + self.package_pages.setdefault(pkg.lower(),{})[link] = True + return to_filename(pkg), to_filename(ver) + return None, None + + # process an index page into the package-page index + for match in HREF.finditer(page): + scan( urlparse.urljoin(url, htmldecode(match.group(1))) ) + + pkg, ver = scan(url) # ensure this page is in the page index + if pkg: + # process individual package page + for new_url in find_external_links(url, page): + # Process the found URL + base, frag = egg_info_for_url(new_url) + if base.endswith('.py') and not frag: + if ver: + new_url+='#egg=%s-%s' % (pkg,ver) + else: + self.need_version_info(url) + self.scan_url(new_url) + + return PYPI_MD5.sub( + lambda m: '%s' % m.group(1,3,2), page + ) + else: + return "" # no sense double-scanning non-package pages + + + + def need_version_info(self, url): + self.scan_all( + "Page at %s links to .py file(s) without version info; an index " + "scan is required.", url + ) + + def scan_all(self, msg=None, *args): + if self.index_url not in self.fetched_urls: + if msg: self.warn(msg,*args) + self.info( + "Scanning index of all packages (this may take a while)" + ) + self.scan_url(self.index_url) + + def find_packages(self, requirement): + self.scan_url(self.index_url + requirement.unsafe_name+'/') + + if not self.package_pages.get(requirement.key): + # Fall back to safe version of the name + self.scan_url(self.index_url + requirement.project_name+'/') + + if not self.package_pages.get(requirement.key): + # We couldn't find the target package, so search the index page too + self.not_found_in_index(requirement) + + for url in list(self.package_pages.get(requirement.key,())): + # scan each page that might be related to the desired package + self.scan_url(url) + + def obtain(self, requirement, installer=None): + self.prescan(); self.find_packages(requirement) + for dist in self[requirement.key]: + if dist in requirement: + return dist + self.debug("%s does not match %s", requirement, dist) + return super(PackageIndex, self).obtain(requirement,installer) + + + + + + def check_md5(self, cs, info, filename, tfp): + if re.match('md5=[0-9a-f]{32}$', info): + self.debug("Validating md5 checksum for %s", filename) + if cs.hexdigest()!=info[4:]: + tfp.close() + os.unlink(filename) + raise DistutilsError( + "MD5 validation failed for "+os.path.basename(filename)+ + "; possible download problem?" + ) + + def add_find_links(self, urls): + """Add `urls` to the list that will be prescanned for searches""" + for url in urls: + if ( + self.to_scan is None # if we have already "gone online" + or not URL_SCHEME(url) # or it's a local file/directory + or url.startswith('file:') + or list(distros_for_url(url)) # or a direct package link + ): + # then go ahead and process it now + self.scan_url(url) + else: + # otherwise, defer retrieval till later + self.to_scan.append(url) + + def prescan(self): + """Scan urls scheduled for prescanning (e.g. --find-links)""" + if self.to_scan: + map(self.scan_url, self.to_scan) + self.to_scan = None # from now on, go ahead and process immediately + + def not_found_in_index(self, requirement): + if self[requirement.key]: # we've seen at least one distro + meth, msg = self.info, "Couldn't retrieve index page for %r" + else: # no distros seen for this name, might be misspelled + meth, msg = (self.warn, + "Couldn't find index page for %r (maybe misspelled?)") + meth(msg, requirement.unsafe_name) + self.scan_all() + + def download(self, spec, tmpdir): + """Locate and/or download `spec` to `tmpdir`, returning a local path + + `spec` may be a ``Requirement`` object, or a string containing a URL, + an existing local filename, or a project/version requirement spec + (i.e. the string form of a ``Requirement`` object). If it is the URL + of a .py file with an unambiguous ``#egg=name-version`` tag (i.e., one + that escapes ``-`` as ``_`` throughout), a trivial ``setup.py`` is + automatically created alongside the downloaded file. + + If `spec` is a ``Requirement`` object or a string containing a + project/version requirement spec, this method returns the location of + a matching distribution (possibly after downloading it to `tmpdir`). + If `spec` is a locally existing file or directory name, it is simply + returned unchanged. If `spec` is a URL, it is downloaded to a subpath + of `tmpdir`, and the local filename is returned. Various errors may be + raised if a problem occurs during downloading. + """ + if not isinstance(spec,Requirement): + scheme = URL_SCHEME(spec) + if scheme: + # It's a url, download it to tmpdir + found = self._download_url(scheme.group(1), spec, tmpdir) + base, fragment = egg_info_for_url(spec) + if base.endswith('.py'): + found = self.gen_setup(found,fragment,tmpdir) + return found + elif os.path.exists(spec): + # Existing file or directory, just return it + return spec + else: + try: + spec = Requirement.parse(spec) + except ValueError: + raise DistutilsError( + "Not a URL, existing file, or requirement spec: %r" % + (spec,) + ) + return getattr(self.fetch_distribution(spec, tmpdir),'location',None) + + + def fetch_distribution(self, + requirement, tmpdir, force_scan=False, source=False, develop_ok=False, + local_index=None, + ): + """Obtain a distribution suitable for fulfilling `requirement` + + `requirement` must be a ``pkg_resources.Requirement`` instance. + If necessary, or if the `force_scan` flag is set, the requirement is + searched for in the (online) package index as well as the locally + installed packages. If a distribution matching `requirement` is found, + the returned distribution's ``location`` is the value you would have + gotten from calling the ``download()`` method with the matching + distribution's URL or filename. If no matching distribution is found, + ``None`` is returned. + + If the `source` flag is set, only source distributions and source + checkout links will be considered. Unless the `develop_ok` flag is + set, development and system eggs (i.e., those using the ``.egg-info`` + format) will be ignored. + """ + # process a Requirement + self.info("Searching for %s", requirement) + skipped = {} + dist = None + + def find(env, req): + # Find a matching distribution; may be called more than once + + # first try to find a local dist + for allow_remote in (False, True): + # then try to find a platform-dependent dist + for allow_platform_independent in (False, True): + for dist in env[req.key]: + if dist.precedence==DEVELOP_DIST and not develop_ok: + if dist not in skipped: + self.warn("Skipping development or system egg: %s",dist) + skipped[dist] = 1 + continue + + if ((is_local(dist.location) or allow_remote) and + (dist in req) and + ((allow_platform_independent or dist.platform is not None) and + (dist.precedence<=SOURCE_DIST or not source))): + return dist + + if force_scan: + self.prescan() + self.find_packages(requirement) + dist = find(self, requirement) + + if local_index is not None: + dist = dist or find(local_index, requirement) + + if dist is None and self.to_scan is not None: + self.prescan() + dist = find(self, requirement) + + if dist is None and not force_scan: + self.find_packages(requirement) + dist = find(self, requirement) + + if dist is None: + self.warn( + "No local packages or download links found for %s%s", + (source and "a source distribution of " or ""), + requirement, + ) + else: + self.info("Best match: %s", dist) + return dist.clone(location=self.download(dist.location, tmpdir)) + + + def fetch(self, requirement, tmpdir, force_scan=False, source=False): + """Obtain a file suitable for fulfilling `requirement` + + DEPRECATED; use the ``fetch_distribution()`` method now instead. For + backward compatibility, this routine is identical but returns the + ``location`` of the downloaded distribution instead of a distribution + object. + """ + dist = self.fetch_distribution(requirement,tmpdir,force_scan,source) + if dist is not None: + return dist.location + return None + + + def gen_setup(self, filename, fragment, tmpdir): + match = EGG_FRAGMENT.match(fragment) + dists = match and [d for d in + interpret_distro_name(filename, match.group(1), None) if d.version + ] or [] + + if len(dists)==1: # unambiguous ``#egg`` fragment + basename = os.path.basename(filename) + + # Make sure the file has been downloaded to the temp dir. + if os.path.dirname(filename) != tmpdir: + dst = os.path.join(tmpdir, basename) + from setuptools.command.easy_install import samefile + if not samefile(filename, dst): + shutil.copy2(filename, dst) + filename=dst + + file = open(os.path.join(tmpdir, 'setup.py'), 'w') + file.write( + "from setuptools import setup\n" + "setup(name=%r, version=%r, py_modules=[%r])\n" + % ( + dists[0].project_name, dists[0].version, + os.path.splitext(basename)[0] + ) + ) + file.close() + return filename + + elif match: + raise DistutilsError( + "Can't unambiguously interpret project/version identifier %r; " + "any dashes in the name or version should be escaped using " + "underscores. %r" % (fragment,dists) + ) + else: + raise DistutilsError( + "Can't process plain .py files without an '#egg=name-version'" + " suffix to enable automatic setup script generation." + ) + + dl_blocksize = 8192 + def _download_to(self, url, filename): + self.info("Downloading %s", url) + # Download the file + fp, tfp, info = None, None, None + try: + if '#' in url: + url, info = url.split('#', 1) + fp = self.open_url(url) + if isinstance(fp, urllib2.HTTPError): + raise DistutilsError( + "Can't download %s: %s %s" % (url, fp.code,fp.msg) + ) + cs = md5() + headers = fp.info() + blocknum = 0 + bs = self.dl_blocksize + size = -1 + if "content-length" in headers: + size = int(headers["Content-Length"]) + self.reporthook(url, filename, blocknum, bs, size) + tfp = open(filename,'wb') + while True: + block = fp.read(bs) + if block: + cs.update(block) + tfp.write(block) + blocknum += 1 + self.reporthook(url, filename, blocknum, bs, size) + else: + break + if info: self.check_md5(cs, info, filename, tfp) + return headers + finally: + if fp: fp.close() + if tfp: tfp.close() + + def reporthook(self, url, filename, blocknum, blksize, size): + pass # no-op + + + def open_url(self, url, warning=None): + if url.startswith('file:'): return local_open(url) + try: + return open_with_auth(url) + except urllib2.HTTPError, v: + return v + except urllib2.URLError, v: + reason = v.reason + except httplib.HTTPException, v: + reason = "%s: %s" % (v.__doc__ or v.__class__.__name__, v) + if warning: + self.warn(warning, reason) + else: + raise DistutilsError("Download error for %s: %s" % (url, reason)) + + def _download_url(self, scheme, url, tmpdir): + # Determine download filename + # + name, fragment = egg_info_for_url(url) + if name: + while '..' in name: + name = name.replace('..','.').replace('\\','_') + else: + name = "__downloaded__" # default if URL has no path contents + + if name.endswith('.egg.zip'): + name = name[:-4] # strip the extra .zip before download + + filename = os.path.join(tmpdir,name) + + # Download the file + # + if scheme=='svn' or scheme.startswith('svn+'): + return self._download_svn(url, filename) + elif scheme=='file': + return urllib2.url2pathname(urlparse.urlparse(url)[2]) + else: + self.url_ok(url, True) # raises error if not allowed + return self._attempt_download(url, filename) + + + def scan_url(self, url): + self.process_url(url, True) + + + def _attempt_download(self, url, filename): + headers = self._download_to(url, filename) + if 'html' in headers.get('content-type','').lower(): + return self._download_html(url, headers, filename) + else: + return filename + + def _download_html(self, url, headers, filename): + file = open(filename) + for line in file: + if line.strip(): + # Check for a subversion index page + if re.search(r'([^- ]+ - )?Revision \d+:', line): + # it's a subversion index page: + file.close() + os.unlink(filename) + return self._download_svn(url, filename) + break # not an index page + file.close() + os.unlink(filename) + raise DistutilsError("Unexpected HTML page found at "+url) + + def _download_svn(self, url, filename): + url = url.split('#',1)[0] # remove any fragment for svn's sake + self.info("Doing subversion checkout from %s to %s", url, filename) + os.system("svn checkout -q %s %s" % (url, filename)) + return filename + + def debug(self, msg, *args): + log.debug(msg, *args) + + def info(self, msg, *args): + log.info(msg, *args) + + def warn(self, msg, *args): + log.warn(msg, *args) + +# This pattern matches a character entity reference (a decimal numeric +# references, a hexadecimal numeric reference, or a named reference). +entity_sub = re.compile(r'&(#(\d+|x[\da-fA-F]+)|[\w.:-]+);?').sub + +def uchr(c): + if not isinstance(c, int): + return c + if c>255: return unichr(c) + return chr(c) + +def decode_entity(match): + what = match.group(1) + if what.startswith('#x'): + what = int(what[2:], 16) + elif what.startswith('#'): + what = int(what[1:]) + else: + from htmlentitydefs import name2codepoint + what = name2codepoint.get(what, match.group(0)) + return uchr(what) + +def htmldecode(text): + """Decode HTML entities in the given text.""" + return entity_sub(decode_entity, text) + + + + + + + + + + + + + + + + + +def open_with_auth(url): + """Open a urllib2 request, handling HTTP authentication""" + + scheme, netloc, path, params, query, frag = urlparse.urlparse(url) + + if scheme in ('http', 'https'): + auth, host = urllib.splituser(netloc) + else: + auth = None + + if auth: + auth = "Basic " + urllib2.unquote(auth).encode('base64').strip() + new_url = urlparse.urlunparse((scheme,host,path,params,query,frag)) + request = urllib2.Request(new_url) + request.add_header("Authorization", auth) + else: + request = urllib2.Request(url) + + request.add_header('User-Agent', user_agent) + fp = urllib2.urlopen(request) + + if auth: + # Put authentication info back into request URL if same host, + # so that links found on the page will work + s2, h2, path2, param2, query2, frag2 = urlparse.urlparse(fp.url) + if s2==scheme and h2==host: + fp.url = urlparse.urlunparse((s2,netloc,path2,param2,query2,frag2)) + + return fp + + + + + + + + + + + + +def fix_sf_url(url): + return url # backward compatibility + +def local_open(url): + """Read a local path, with special support for directories""" + scheme, server, path, param, query, frag = urlparse.urlparse(url) + filename = urllib2.url2pathname(path) + if os.path.isfile(filename): + return urllib2.urlopen(url) + elif path.endswith('/') and os.path.isdir(filename): + files = [] + for f in os.listdir(filename): + if f=='index.html': + body = open(os.path.join(filename,f),'rb').read() + break + elif os.path.isdir(os.path.join(filename,f)): + f+='/' + files.append("<a href=%r>%s</a>" % (f,f)) + else: + body = ("<html><head><title>%s" % url) + \ + "%s" % '\n'.join(files) + status, message = 200, "OK" + else: + status, message, body = 404, "Path not found", "Not found" + + return urllib2.HTTPError(url, status, message, + {'content-type':'text/html'}, cStringIO.StringIO(body)) + + + + + + + + + + + + + +# this line is a kludge to keep the trailing blank lines for pje's editor diff -Nru tahoe-lafs-1.9.2/setuptools-0.6c16dev4.egg/setuptools/sandbox.py tahoe-lafs-1.10.0/setuptools-0.6c16dev4.egg/setuptools/sandbox.py --- tahoe-lafs-1.9.2/setuptools-0.6c16dev4.egg/setuptools/sandbox.py 1970-01-01 00:00:00.000000000 +0000 +++ tahoe-lafs-1.10.0/setuptools-0.6c16dev4.egg/setuptools/sandbox.py 2013-09-03 15:38:27.000000000 +0000 @@ -0,0 +1,287 @@ +import os, sys, __builtin__, tempfile, operator, pkg_resources +_os = sys.modules[os.name] +_open = open +_file = file + +from distutils.errors import DistutilsError +from pkg_resources import working_set + +__all__ = [ + "AbstractSandbox", "DirectorySandbox", "SandboxViolation", "run_setup", +] + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +def run_setup(setup_script, args): + """Run a distutils setup script, sandboxed in its directory""" + old_dir = os.getcwd() + save_argv = sys.argv[:] + save_path = sys.path[:] + setup_dir = os.path.abspath(os.path.dirname(setup_script)) + temp_dir = os.path.join(setup_dir,'temp') + if not os.path.isdir(temp_dir): os.makedirs(temp_dir) + save_tmp = tempfile.tempdir + save_modules = sys.modules.copy() + pr_state = pkg_resources.__getstate__() + try: + tempfile.tempdir = temp_dir; os.chdir(setup_dir) + try: + sys.argv[:] = [setup_script]+list(args) + sys.path.insert(0, setup_dir) + # reset to include setup dir, w/clean callback list + working_set.__init__() + working_set.callbacks.append(lambda dist:dist.activate()) + DirectorySandbox(setup_dir).run( + lambda: execfile( + "setup.py", + {'__file__':setup_script, '__name__':'__main__'} + ) + ) + except SystemExit, v: + if v.args and v.args[0]: + raise + # Normal exit, just return + finally: + pkg_resources.__setstate__(pr_state) + sys.modules.update(save_modules) + for key in list(sys.modules): + if key not in save_modules: del sys.modules[key] + os.chdir(old_dir) + sys.path[:] = save_path + sys.argv[:] = save_argv + tempfile.tempdir = save_tmp + + + +class AbstractSandbox: + """Wrap 'os' module and 'open()' builtin for virtualizing setup scripts""" + + _active = False + + def __init__(self): + self._attrs = [ + name for name in dir(_os) + if not name.startswith('_') and hasattr(self,name) + ] + + def _copy(self, source): + for name in self._attrs: + setattr(os, name, getattr(source,name)) + + def run(self, func): + """Run 'func' under os sandboxing""" + try: + self._copy(self) + __builtin__.file = self._file + __builtin__.open = self._open + self._active = True + return func() + finally: + self._active = False + __builtin__.open = _open + __builtin__.file = _file + self._copy(_os) + + def _mk_dual_path_wrapper(name): + original = getattr(_os,name) + def wrap(self,src,dst,*args,**kw): + if self._active: + src,dst = self._remap_pair(name,src,dst,*args,**kw) + return original(src,dst,*args,**kw) + return wrap + + for name in ["rename", "link", "symlink"]: + if hasattr(_os,name): locals()[name] = _mk_dual_path_wrapper(name) + + + def _mk_single_path_wrapper(name, original=None): + original = original or getattr(_os,name) + def wrap(self,path,*args,**kw): + if self._active: + path = self._remap_input(name,path,*args,**kw) + return original(path,*args,**kw) + return wrap + + _open = _mk_single_path_wrapper('open', _open) + _file = _mk_single_path_wrapper('file', _file) + for name in [ + "stat", "listdir", "chdir", "open", "chmod", "chown", "mkdir", + "remove", "unlink", "rmdir", "utime", "lchown", "chroot", "lstat", + "startfile", "mkfifo", "mknod", "pathconf", "access" + ]: + if hasattr(_os,name): locals()[name] = _mk_single_path_wrapper(name) + + def _mk_single_with_return(name): + original = getattr(_os,name) + def wrap(self,path,*args,**kw): + if self._active: + path = self._remap_input(name,path,*args,**kw) + return self._remap_output(name, original(path,*args,**kw)) + return original(path,*args,**kw) + return wrap + + for name in ['readlink', 'tempnam']: + if hasattr(_os,name): locals()[name] = _mk_single_with_return(name) + + def _mk_query(name): + original = getattr(_os,name) + def wrap(self,*args,**kw): + retval = original(*args,**kw) + if self._active: + return self._remap_output(name, retval) + return retval + return wrap + + for name in ['getcwd', 'tmpnam']: + if hasattr(_os,name): locals()[name] = _mk_query(name) + + def _validate_path(self,path): + """Called to remap or validate any path, whether input or output""" + return path + + def _remap_input(self,operation,path,*args,**kw): + """Called for path inputs""" + return self._validate_path(path) + + def _remap_output(self,operation,path): + """Called for path outputs""" + return self._validate_path(path) + + def _remap_pair(self,operation,src,dst,*args,**kw): + """Called for path pairs like rename, link, and symlink operations""" + return ( + self._remap_input(operation+'-from',src,*args,**kw), + self._remap_input(operation+'-to',dst,*args,**kw) + ) + + +class DirectorySandbox(AbstractSandbox): + """Restrict operations to a single subdirectory - pseudo-chroot""" + + write_ops = dict.fromkeys([ + "open", "chmod", "chown", "mkdir", "remove", "unlink", "rmdir", + "utime", "lchown", "chroot", "mkfifo", "mknod", "tempnam", + ]) + + def __init__(self,sandbox): + self._sandbox = os.path.normcase(os.path.realpath(sandbox)) + self._prefix = os.path.join(self._sandbox,'') + AbstractSandbox.__init__(self) + + def _violation(self, operation, *args, **kw): + raise SandboxViolation(operation, args, kw) + + def _open(self, path, mode='r', *args, **kw): + if mode not in ('r', 'rt', 'rb', 'rU', 'U') and not self._ok(path): + self._violation("open", path, mode, *args, **kw) + return _open(path,mode,*args,**kw) + + def tmpnam(self): self._violation("tmpnam") + + def _ok(self,path): + if hasattr(_os,'devnull') and path==_os.devnull: return True + active = self._active + try: + self._active = False + realpath = os.path.normcase(os.path.realpath(path)) + if realpath==self._sandbox or realpath.startswith(self._prefix): + return True + finally: + self._active = active + + def _remap_input(self,operation,path,*args,**kw): + """Called for path inputs""" + if operation in self.write_ops and not self._ok(path): + self._violation(operation, os.path.realpath(path), *args, **kw) + return path + + def _remap_pair(self,operation,src,dst,*args,**kw): + """Called for path pairs like rename, link, and symlink operations""" + if not self._ok(src) or not self._ok(dst): + self._violation(operation, src, dst, *args, **kw) + return (src,dst) + + def _file(self, path, mode='r', *args, **kw): + if mode not in ('r', 'rt', 'rb', 'rU', 'U') and not self._ok(path): + self._violation("file", path, mode, *args, **kw) + return _file(path,mode,*args,**kw) + + def open(self, file, flags, mode=0777): + """Called for low-level os.open()""" + if flags & WRITE_FLAGS and not self._ok(file): + self._violation("os.open", file, flags, mode) + return _os.open(file,flags,mode) + +WRITE_FLAGS = reduce( + operator.or_, [getattr(_os, a, 0) for a in + "O_WRONLY O_RDWR O_APPEND O_CREAT O_TRUNC O_TEMPORARY".split()] +) + +class SandboxViolation(DistutilsError): + """A setup script attempted to modify the filesystem outside the sandbox""" + + def __str__(self): + return """SandboxViolation: %s%r %s + +The package setup script has attempted to modify files on your system +that are not within the EasyInstall build area, and has been aborted. + +This package cannot be safely installed by EasyInstall, and may not +support alternate installation locations even if you run its setup +script by hand. Please inform the package's author and the EasyInstall +maintainers to find out if a fix or workaround is available.""" % self.args + + + + + + + + + + + + + + + + + + + + + + + + + + + +# diff -Nru tahoe-lafs-1.9.2/setuptools-0.6c16dev4.egg/setuptools/site-patch.py tahoe-lafs-1.10.0/setuptools-0.6c16dev4.egg/setuptools/site-patch.py --- tahoe-lafs-1.9.2/setuptools-0.6c16dev4.egg/setuptools/site-patch.py 1970-01-01 00:00:00.000000000 +0000 +++ tahoe-lafs-1.10.0/setuptools-0.6c16dev4.egg/setuptools/site-patch.py 2013-09-03 15:38:27.000000000 +0000 @@ -0,0 +1,74 @@ +def __boot(): + import sys, imp, os, os.path + PYTHONPATH = os.environ.get('PYTHONPATH') + if PYTHONPATH is None or (sys.platform=='win32' and not PYTHONPATH): + PYTHONPATH = [] + else: + PYTHONPATH = PYTHONPATH.split(os.pathsep) + + pic = getattr(sys,'path_importer_cache',{}) + stdpath = sys.path[len(PYTHONPATH):] + mydir = os.path.dirname(__file__) + #print "searching",stdpath,sys.path + + for item in stdpath: + if item==mydir or not item: + continue # skip if current dir. on Windows, or my own directory + importer = pic.get(item) + if importer is not None: + loader = importer.find_module('site') + if loader is not None: + # This should actually reload the current module + loader.load_module('site') + break + else: + try: + stream, path, descr = imp.find_module('site',[item]) + except ImportError: + continue + if stream is None: + continue + try: + # This should actually reload the current module + imp.load_module('site',stream,path,descr) + finally: + stream.close() + break + else: + raise ImportError("Couldn't find the real 'site' module") + + #print "loaded", __file__ + + known_paths = dict([(makepath(item)[1],1) for item in sys.path]) # 2.2 comp + + oldpos = getattr(sys,'__egginsert',0) # save old insertion position + sys.__egginsert = 0 # and reset the current one + + for item in PYTHONPATH: + addsitedir(item) + + sys.__egginsert += oldpos # restore effective old position + + d,nd = makepath(stdpath[0]) + insert_at = None + new_path = [] + + for item in sys.path: + p,np = makepath(item) + + if np==nd and insert_at is None: + # We've hit the first 'system' path entry, so added entries go here + insert_at = len(new_path) + + if np in known_paths or insert_at is None: + new_path.append(item) + else: + # new path after the insert point, back-insert it + new_path.insert(insert_at, item) + insert_at += 1 + + sys.path[:] = new_path + +if __name__=='site': + __boot() + del __boot diff -Nru tahoe-lafs-1.9.2/setuptools-0.6c16dev4.egg/setuptools/tests/__init__.py tahoe-lafs-1.10.0/setuptools-0.6c16dev4.egg/setuptools/tests/__init__.py --- tahoe-lafs-1.9.2/setuptools-0.6c16dev4.egg/setuptools/tests/__init__.py 1970-01-01 00:00:00.000000000 +0000 +++ tahoe-lafs-1.10.0/setuptools-0.6c16dev4.egg/setuptools/tests/__init__.py 2013-09-03 15:38:27.000000000 +0000 @@ -0,0 +1,369 @@ +"""Tests for the 'setuptools' package""" +from unittest import TestSuite, TestCase, makeSuite, defaultTestLoader +import distutils.core, distutils.cmd +from distutils.errors import DistutilsOptionError, DistutilsPlatformError +from distutils.errors import DistutilsSetupError +import setuptools, setuptools.dist +from setuptools import Feature +from distutils.core import Extension +extract_constant, get_module_constant = None, None +from setuptools.depends import * +from distutils.version import StrictVersion, LooseVersion +from distutils.util import convert_path +import sys, os.path + +def additional_tests(): + import doctest, unittest + suite = unittest.TestSuite(( + doctest.DocFileSuite('api_tests.txt', + optionflags=doctest.ELLIPSIS, package=__name__, + ), + )) + if sys.platform == 'win32': + suite.addTest(doctest.DocFileSuite('win_script_wrapper.txt')) + return suite + +def makeSetup(**args): + """Return distribution from 'setup(**args)', without executing commands""" + + distutils.core._setup_stop_after = "commandline" + + # Don't let system command line leak into tests! + args.setdefault('script_args',['install']) + + try: + return setuptools.setup(**args) + finally: + distutils.core_setup_stop_after = None + + + + +class DependsTests(TestCase): + + def testExtractConst(self): + if not extract_constant: return # skip on non-bytecode platforms + + def f1(): + global x,y,z + x = "test" + y = z + + # unrecognized name + self.assertEqual(extract_constant(f1.func_code,'q', -1), None) + + # constant assigned + self.assertEqual(extract_constant(f1.func_code,'x', -1), "test") + + # expression assigned + self.assertEqual(extract_constant(f1.func_code,'y', -1), -1) + + # recognized name, not assigned + self.assertEqual(extract_constant(f1.func_code,'z', -1), None) + + + def testFindModule(self): + self.assertRaises(ImportError, find_module, 'no-such.-thing') + self.assertRaises(ImportError, find_module, 'setuptools.non-existent') + f,p,i = find_module('setuptools.tests'); f.close() + + def testModuleExtract(self): + if not get_module_constant: return # skip on non-bytecode platforms + from distutils import __version__ + self.assertEqual( + get_module_constant('distutils','__version__'), __version__ + ) + self.assertEqual( + get_module_constant('sys','version'), sys.version + ) + self.assertEqual( + get_module_constant('setuptools.tests','__doc__'),__doc__ + ) + + def testRequire(self): + if not extract_constant: return # skip on non-bytecode platforms + + req = Require('Distutils','1.0.3','distutils') + + self.assertEqual(req.name, 'Distutils') + self.assertEqual(req.module, 'distutils') + self.assertEqual(req.requested_version, '1.0.3') + self.assertEqual(req.attribute, '__version__') + self.assertEqual(req.full_name(), 'Distutils-1.0.3') + + from distutils import __version__ + self.assertEqual(req.get_version(), __version__) + self.failUnless(req.version_ok('1.0.9')) + self.failIf(req.version_ok('0.9.1')) + self.failIf(req.version_ok('unknown')) + + self.failUnless(req.is_present()) + self.failUnless(req.is_current()) + + req = Require('Distutils 3000','03000','distutils',format=LooseVersion) + self.failUnless(req.is_present()) + self.failIf(req.is_current()) + self.failIf(req.version_ok('unknown')) + + req = Require('Do-what-I-mean','1.0','d-w-i-m') + self.failIf(req.is_present()) + self.failIf(req.is_current()) + + req = Require('Tests', None, 'tests', homepage="http://example.com") + self.assertEqual(req.format, None) + self.assertEqual(req.attribute, None) + self.assertEqual(req.requested_version, None) + self.assertEqual(req.full_name(), 'Tests') + self.assertEqual(req.homepage, 'http://example.com') + + paths = [os.path.dirname(p) for p in __path__] + self.failUnless(req.is_present(paths)) + self.failUnless(req.is_current(paths)) + + +class DistroTests(TestCase): + + def setUp(self): + self.e1 = Extension('bar.ext',['bar.c']) + self.e2 = Extension('c.y', ['y.c']) + + self.dist = makeSetup( + packages=['a', 'a.b', 'a.b.c', 'b', 'c'], + py_modules=['b.d','x'], + ext_modules = (self.e1, self.e2), + package_dir = {}, + ) + + + def testDistroType(self): + self.failUnless(isinstance(self.dist,setuptools.dist.Distribution)) + + + def testExcludePackage(self): + self.dist.exclude_package('a') + self.assertEqual(self.dist.packages, ['b','c']) + + self.dist.exclude_package('b') + self.assertEqual(self.dist.packages, ['c']) + self.assertEqual(self.dist.py_modules, ['x']) + self.assertEqual(self.dist.ext_modules, [self.e1, self.e2]) + + self.dist.exclude_package('c') + self.assertEqual(self.dist.packages, []) + self.assertEqual(self.dist.py_modules, ['x']) + self.assertEqual(self.dist.ext_modules, [self.e1]) + + # test removals from unspecified options + makeSetup().exclude_package('x') + + + + + + + + def testIncludeExclude(self): + # remove an extension + self.dist.exclude(ext_modules=[self.e1]) + self.assertEqual(self.dist.ext_modules, [self.e2]) + + # add it back in + self.dist.include(ext_modules=[self.e1]) + self.assertEqual(self.dist.ext_modules, [self.e2, self.e1]) + + # should not add duplicate + self.dist.include(ext_modules=[self.e1]) + self.assertEqual(self.dist.ext_modules, [self.e2, self.e1]) + + def testExcludePackages(self): + self.dist.exclude(packages=['c','b','a']) + self.assertEqual(self.dist.packages, []) + self.assertEqual(self.dist.py_modules, ['x']) + self.assertEqual(self.dist.ext_modules, [self.e1]) + + def testEmpty(self): + dist = makeSetup() + dist.include(packages=['a'], py_modules=['b'], ext_modules=[self.e2]) + dist = makeSetup() + dist.exclude(packages=['a'], py_modules=['b'], ext_modules=[self.e2]) + + def testContents(self): + self.failUnless(self.dist.has_contents_for('a')) + self.dist.exclude_package('a') + self.failIf(self.dist.has_contents_for('a')) + + self.failUnless(self.dist.has_contents_for('b')) + self.dist.exclude_package('b') + self.failIf(self.dist.has_contents_for('b')) + + self.failUnless(self.dist.has_contents_for('c')) + self.dist.exclude_package('c') + self.failIf(self.dist.has_contents_for('c')) + + + + + def testInvalidIncludeExclude(self): + self.assertRaises(DistutilsSetupError, + self.dist.include, nonexistent_option='x' + ) + self.assertRaises(DistutilsSetupError, + self.dist.exclude, nonexistent_option='x' + ) + self.assertRaises(DistutilsSetupError, + self.dist.include, packages={'x':'y'} + ) + self.assertRaises(DistutilsSetupError, + self.dist.exclude, packages={'x':'y'} + ) + self.assertRaises(DistutilsSetupError, + self.dist.include, ext_modules={'x':'y'} + ) + self.assertRaises(DistutilsSetupError, + self.dist.exclude, ext_modules={'x':'y'} + ) + + self.assertRaises(DistutilsSetupError, + self.dist.include, package_dir=['q'] + ) + self.assertRaises(DistutilsSetupError, + self.dist.exclude, package_dir=['q'] + ) + + + + + + + + + + + + + + + +class FeatureTests(TestCase): + + def setUp(self): + self.req = Require('Distutils','1.0.3','distutils') + self.dist = makeSetup( + features={ + 'foo': Feature("foo",standard=True,require_features=['baz',self.req]), + 'bar': Feature("bar", standard=True, packages=['pkg.bar'], + py_modules=['bar_et'], remove=['bar.ext'], + ), + 'baz': Feature( + "baz", optional=False, packages=['pkg.baz'], + scripts = ['scripts/baz_it'], + libraries=[('libfoo','foo/foofoo.c')] + ), + 'dwim': Feature("DWIM", available=False, remove='bazish'), + }, + script_args=['--without-bar', 'install'], + packages = ['pkg.bar', 'pkg.foo'], + py_modules = ['bar_et', 'bazish'], + ext_modules = [Extension('bar.ext',['bar.c'])] + ) + + def testDefaults(self): + self.failIf( + Feature( + "test",standard=True,remove='x',available=False + ).include_by_default() + ) + self.failUnless( + Feature("test",standard=True,remove='x').include_by_default() + ) + # Feature must have either kwargs, removes, or require_features + self.assertRaises(DistutilsSetupError, Feature, "test") + + def testAvailability(self): + self.assertRaises( + DistutilsPlatformError, + self.dist.features['dwim'].include_in, self.dist + ) + + def testFeatureOptions(self): + dist = self.dist + self.failUnless( + ('with-dwim',None,'include DWIM') in dist.feature_options + ) + self.failUnless( + ('without-dwim',None,'exclude DWIM (default)') in dist.feature_options + ) + self.failUnless( + ('with-bar',None,'include bar (default)') in dist.feature_options + ) + self.failUnless( + ('without-bar',None,'exclude bar') in dist.feature_options + ) + self.assertEqual(dist.feature_negopt['without-foo'],'with-foo') + self.assertEqual(dist.feature_negopt['without-bar'],'with-bar') + self.assertEqual(dist.feature_negopt['without-dwim'],'with-dwim') + self.failIf('without-baz' in dist.feature_negopt) + + def testUseFeatures(self): + dist = self.dist + self.assertEqual(dist.with_foo,1) + self.assertEqual(dist.with_bar,0) + self.assertEqual(dist.with_baz,1) + self.failIf('bar_et' in dist.py_modules) + self.failIf('pkg.bar' in dist.packages) + self.failUnless('pkg.baz' in dist.packages) + self.failUnless('scripts/baz_it' in dist.scripts) + self.failUnless(('libfoo','foo/foofoo.c') in dist.libraries) + self.assertEqual(dist.ext_modules,[]) + self.assertEqual(dist.require_features, [self.req]) + + # If we ask for bar, it should fail because we explicitly disabled + # it on the command line + self.assertRaises(DistutilsOptionError, dist.include_feature, 'bar') + + def testFeatureWithInvalidRemove(self): + self.assertRaises( + SystemExit, makeSetup, features = {'x':Feature('x', remove='y')} + ) + +class TestCommandTests(TestCase): + + def testTestIsCommand(self): + test_cmd = makeSetup().get_command_obj('test') + self.failUnless(isinstance(test_cmd, distutils.cmd.Command)) + + def testLongOptSuiteWNoDefault(self): + ts1 = makeSetup(script_args=['test','--test-suite=foo.tests.suite']) + ts1 = ts1.get_command_obj('test') + ts1.ensure_finalized() + self.assertEqual(ts1.test_suite, 'foo.tests.suite') + + def testDefaultSuite(self): + ts2 = makeSetup(test_suite='bar.tests.suite').get_command_obj('test') + ts2.ensure_finalized() + self.assertEqual(ts2.test_suite, 'bar.tests.suite') + + def testDefaultWModuleOnCmdLine(self): + ts3 = makeSetup( + test_suite='bar.tests', + script_args=['test','-m','foo.tests'] + ).get_command_obj('test') + ts3.ensure_finalized() + self.assertEqual(ts3.test_module, 'foo.tests') + self.assertEqual(ts3.test_suite, 'foo.tests.test_suite') + + def testConflictingOptions(self): + ts4 = makeSetup( + script_args=['test','-m','bar.tests', '-s','foo.tests.suite'] + ).get_command_obj('test') + self.assertRaises(DistutilsOptionError, ts4.ensure_finalized) + + def testNoSuite(self): + ts5 = makeSetup().get_command_obj('test') + ts5.ensure_finalized() + self.assertEqual(ts5.test_suite, None) + + + + + diff -Nru tahoe-lafs-1.9.2/setuptools-0.6c16dev4.egg/setuptools/tests/test_packageindex.py tahoe-lafs-1.10.0/setuptools-0.6c16dev4.egg/setuptools/tests/test_packageindex.py --- tahoe-lafs-1.9.2/setuptools-0.6c16dev4.egg/setuptools/tests/test_packageindex.py 1970-01-01 00:00:00.000000000 +0000 +++ tahoe-lafs-1.10.0/setuptools-0.6c16dev4.egg/setuptools/tests/test_packageindex.py 2013-09-03 15:38:27.000000000 +0000 @@ -0,0 +1,27 @@ +"""Package Index Tests +""" +# More would be better! + +import os, shutil, tempfile, unittest, urllib2 +import pkg_resources +import setuptools.package_index + +class TestPackageIndex(unittest.TestCase): + + def test_bad_urls(self): + index = setuptools.package_index.PackageIndex() + url = 'http://127.0.0.1/nonesuch/test_package_index' + try: + v = index.open_url(url) + except Exception, v: + self.assert_(url in str(v)) + else: + self.assert_(isinstance(v,urllib2.HTTPError)) + + def test_url_ok(self): + index = setuptools.package_index.PackageIndex( + hosts=('www.example.com',) + ) + url = 'file:///tmp/test_package_index' + self.assert_(index.url_ok(url, True)) + diff -Nru tahoe-lafs-1.9.2/setuptools-0.6c16dev4.egg/setuptools/tests/test_resources.py tahoe-lafs-1.10.0/setuptools-0.6c16dev4.egg/setuptools/tests/test_resources.py --- tahoe-lafs-1.9.2/setuptools-0.6c16dev4.egg/setuptools/tests/test_resources.py 1970-01-01 00:00:00.000000000 +0000 +++ tahoe-lafs-1.10.0/setuptools-0.6c16dev4.egg/setuptools/tests/test_resources.py 2013-09-03 15:38:27.000000000 +0000 @@ -0,0 +1,533 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# NOTE: the shebang and encoding lines are for ScriptHeaderTests; do not remove +from unittest import TestCase, makeSuite; from pkg_resources import * +from setuptools.command.easy_install import get_script_header, is_sh +import os, pkg_resources, sys, StringIO +try: frozenset +except NameError: + from sets import ImmutableSet as frozenset + +class Metadata(EmptyProvider): + """Mock object to return metadata as if from an on-disk distribution""" + + def __init__(self,*pairs): + self.metadata = dict(pairs) + + def has_metadata(self,name): + return name in self.metadata + + def get_metadata(self,name): + return self.metadata[name] + + def get_metadata_lines(self,name): + return yield_lines(self.get_metadata(name)) + +class DistroTests(TestCase): + + def testCollection(self): + # empty path should produce no distributions + ad = Environment([], platform=None, python=None) + self.assertEqual(list(ad), []) + self.assertEqual(ad['FooPkg'],[]) + ad.add(Distribution.from_filename("FooPkg-1.3_1.egg")) + ad.add(Distribution.from_filename("FooPkg-1.4-py2.4-win32.egg")) + ad.add(Distribution.from_filename("FooPkg-1.2-py2.4.egg")) + + # Name is in there now + self.failUnless(ad['FooPkg']) + # But only 1 package + self.assertEqual(list(ad), ['foopkg']) + + # Distributions sort by version + self.assertEqual( + [dist.version for dist in ad['FooPkg']], ['1.4','1.3-1','1.2'] + ) + # Removing a distribution leaves sequence alone + ad.remove(ad['FooPkg'][1]) + self.assertEqual( + [dist.version for dist in ad['FooPkg']], ['1.4','1.2'] + ) + # And inserting adds them in order + ad.add(Distribution.from_filename("FooPkg-1.9.egg")) + self.assertEqual( + [dist.version for dist in ad['FooPkg']], ['1.9','1.4','1.2'] + ) + + ws = WorkingSet([]) + foo12 = Distribution.from_filename("FooPkg-1.2-py2.4.egg") + foo14 = Distribution.from_filename("FooPkg-1.4-py2.4-win32.egg") + req, = parse_requirements("FooPkg>=1.3") + + # Nominal case: no distros on path, should yield all applicable + self.assertEqual(ad.best_match(req,ws).version, '1.9') + # If a matching distro is already installed, should return only that + ws.add(foo14); self.assertEqual(ad.best_match(req,ws).version, '1.4') + + # If the first matching distro is unsuitable, it's a version conflict + ws = WorkingSet([]); ws.add(foo12); ws.add(foo14) + self.assertRaises(VersionConflict, ad.best_match, req, ws) + + # If more than one match on the path, the first one takes precedence + ws = WorkingSet([]); ws.add(foo14); ws.add(foo12); ws.add(foo14); + self.assertEqual(ad.best_match(req,ws).version, '1.4') + + def checkFooPkg(self,d): + self.assertEqual(d.project_name, "FooPkg") + self.assertEqual(d.key, "foopkg") + self.assertEqual(d.version, "1.3-1") + self.assertEqual(d.py_version, "2.4") + self.assertEqual(d.platform, "win32") + self.assertEqual(d.parsed_version, parse_version("1.3-1")) + + def testDistroBasics(self): + d = Distribution( + "/some/path", + project_name="FooPkg",version="1.3-1",py_version="2.4",platform="win32" + ) + self.checkFooPkg(d) + + d = Distribution("/some/path") + self.assertEqual(d.py_version, sys.version[:3]) + self.assertEqual(d.platform, None) + + def testDistroParse(self): + d = Distribution.from_filename("FooPkg-1.3_1-py2.4-win32.egg") + self.checkFooPkg(d) + d = Distribution.from_filename("FooPkg-1.3_1-py2.4-win32.egg-info") + self.checkFooPkg(d) + + def testDistroMetadata(self): + d = Distribution( + "/some/path", project_name="FooPkg", py_version="2.4", platform="win32", + metadata = Metadata( + ('PKG-INFO',"Metadata-Version: 1.0\nVersion: 1.3-1\n") + ) + ) + self.checkFooPkg(d) + + + def distRequires(self, txt): + return Distribution("/foo", metadata=Metadata(('depends.txt', txt))) + + def checkRequires(self, dist, txt, extras=()): + self.assertEqual( + list(dist.requires(extras)), + list(parse_requirements(txt)) + ) + + def testDistroDependsSimple(self): + for v in "Twisted>=1.5", "Twisted>=1.5\nZConfig>=2.0": + self.checkRequires(self.distRequires(v), v) + + + def testResolve(self): + ad = Environment([]); ws = WorkingSet([]) + # Resolving no requirements -> nothing to install + self.assertEqual( list(ws.resolve([],ad)), [] ) + # Request something not in the collection -> DistributionNotFound + self.assertRaises( + DistributionNotFound, ws.resolve, parse_requirements("Foo"), ad + ) + Foo = Distribution.from_filename( + "/foo_dir/Foo-1.2.egg", + metadata=Metadata(('depends.txt', "[bar]\nBaz>=2.0")) + ) + ad.add(Foo); ad.add(Distribution.from_filename("Foo-0.9.egg")) + + # Request thing(s) that are available -> list to activate + for i in range(3): + targets = list(ws.resolve(parse_requirements("Foo"), ad)) + self.assertEqual(targets, [Foo]) + map(ws.add,targets) + self.assertRaises(VersionConflict, ws.resolve, + parse_requirements("Foo==0.9"), ad) + ws = WorkingSet([]) # reset + + # Request an extra that causes an unresolved dependency for "Baz" + self.assertRaises( + DistributionNotFound, ws.resolve,parse_requirements("Foo[bar]"), ad + ) + Baz = Distribution.from_filename( + "/foo_dir/Baz-2.1.egg", metadata=Metadata(('depends.txt', "Foo")) + ) + ad.add(Baz) + + # Activation list now includes resolved dependency + self.assertEqual( + list(ws.resolve(parse_requirements("Foo[bar]"), ad)), [Foo,Baz] + ) + # Requests for conflicting versions produce VersionConflict + self.assertRaises( VersionConflict, + ws.resolve, parse_requirements("Foo==1.2\nFoo!=1.2"), ad + ) + + def testDistroDependsOptions(self): + d = self.distRequires(""" + Twisted>=1.5 + [docgen] + ZConfig>=2.0 + docutils>=0.3 + [fastcgi] + fcgiapp>=0.1""") + self.checkRequires(d,"Twisted>=1.5") + self.checkRequires( + d,"Twisted>=1.5 ZConfig>=2.0 docutils>=0.3".split(), ["docgen"] + ) + self.checkRequires( + d,"Twisted>=1.5 fcgiapp>=0.1".split(), ["fastcgi"] + ) + self.checkRequires( + d,"Twisted>=1.5 ZConfig>=2.0 docutils>=0.3 fcgiapp>=0.1".split(), + ["docgen","fastcgi"] + ) + self.checkRequires( + d,"Twisted>=1.5 fcgiapp>=0.1 ZConfig>=2.0 docutils>=0.3".split(), + ["fastcgi", "docgen"] + ) + self.assertRaises(UnknownExtra, d.requires, ["foo"]) + + + + + + + + + + + + + + + + + +class EntryPointTests(TestCase): + + def assertfields(self, ep): + self.assertEqual(ep.name,"foo") + self.assertEqual(ep.module_name,"setuptools.tests.test_resources") + self.assertEqual(ep.attrs, ("EntryPointTests",)) + self.assertEqual(ep.extras, ("x",)) + self.failUnless(ep.load() is EntryPointTests) + self.assertEqual( + str(ep), + "foo = setuptools.tests.test_resources:EntryPointTests [x]" + ) + + def setUp(self): + self.dist = Distribution.from_filename( + "FooPkg-1.2-py2.4.egg", metadata=Metadata(('requires.txt','[x]'))) + + def testBasics(self): + ep = EntryPoint( + "foo", "setuptools.tests.test_resources", ["EntryPointTests"], + ["x"], self.dist + ) + self.assertfields(ep) + + def testParse(self): + s = "foo = setuptools.tests.test_resources:EntryPointTests [x]" + ep = EntryPoint.parse(s, self.dist) + self.assertfields(ep) + + ep = EntryPoint.parse("bar baz= spammity[PING]") + self.assertEqual(ep.name,"bar baz") + self.assertEqual(ep.module_name,"spammity") + self.assertEqual(ep.attrs, ()) + self.assertEqual(ep.extras, ("ping",)) + + ep = EntryPoint.parse(" fizzly = wocka:foo") + self.assertEqual(ep.name,"fizzly") + self.assertEqual(ep.module_name,"wocka") + self.assertEqual(ep.attrs, ("foo",)) + self.assertEqual(ep.extras, ()) + + def testRejects(self): + for ep in [ + "foo", "x=1=2", "x=a:b:c", "q=x/na", "fez=pish:tush-z", "x=f[a]>2", + ]: + try: EntryPoint.parse(ep) + except ValueError: pass + else: raise AssertionError("Should've been bad", ep) + + def checkSubMap(self, m): + self.assertEqual(len(m), len(self.submap_expect)) + for key, ep in self.submap_expect.iteritems(): + self.assertEqual(repr(m.get(key)), repr(ep)) + + submap_expect = dict( + feature1=EntryPoint('feature1', 'somemodule', ['somefunction']), + feature2=EntryPoint('feature2', 'another.module', ['SomeClass'], ['extra1','extra2']), + feature3=EntryPoint('feature3', 'this.module', extras=['something']) + ) + submap_str = """ + # define features for blah blah + feature1 = somemodule:somefunction + feature2 = another.module:SomeClass [extra1,extra2] + feature3 = this.module [something] + """ + + def testParseList(self): + self.checkSubMap(EntryPoint.parse_group("xyz", self.submap_str)) + self.assertRaises(ValueError, EntryPoint.parse_group, "x a", "foo=bar") + self.assertRaises(ValueError, EntryPoint.parse_group, "x", + ["foo=baz", "foo=bar"]) + + def testParseMap(self): + m = EntryPoint.parse_map({'xyz':self.submap_str}) + self.checkSubMap(m['xyz']) + self.assertEqual(m.keys(),['xyz']) + m = EntryPoint.parse_map("[xyz]\n"+self.submap_str) + self.checkSubMap(m['xyz']) + self.assertEqual(m.keys(),['xyz']) + self.assertRaises(ValueError, EntryPoint.parse_map, ["[xyz]", "[xyz]"]) + self.assertRaises(ValueError, EntryPoint.parse_map, self.submap_str) + +class RequirementsTests(TestCase): + + def testBasics(self): + r = Requirement.parse("Twisted>=1.2") + self.assertEqual(str(r),"Twisted>=1.2") + self.assertEqual(repr(r),"Requirement.parse('Twisted>=1.2')") + self.assertEqual(r, Requirement("Twisted", [('>=','1.2')], ())) + self.assertEqual(r, Requirement("twisTed", [('>=','1.2')], ())) + self.assertNotEqual(r, Requirement("Twisted", [('>=','2.0')], ())) + self.assertNotEqual(r, Requirement("Zope", [('>=','1.2')], ())) + self.assertNotEqual(r, Requirement("Zope", [('>=','3.0')], ())) + self.assertNotEqual(r, Requirement.parse("Twisted[extras]>=1.2")) + + def testOrdering(self): + r1 = Requirement("Twisted", [('==','1.2c1'),('>=','1.2')], ()) + r2 = Requirement("Twisted", [('>=','1.2'),('==','1.2c1')], ()) + self.assertEqual(r1,r2) + self.assertEqual(str(r1),str(r2)) + self.assertEqual(str(r2),"Twisted==1.2c1,>=1.2") + + def testBasicContains(self): + r = Requirement("Twisted", [('>=','1.2')], ()) + foo_dist = Distribution.from_filename("FooPkg-1.3_1.egg") + twist11 = Distribution.from_filename("Twisted-1.1.egg") + twist12 = Distribution.from_filename("Twisted-1.2.egg") + self.failUnless(parse_version('1.2') in r) + self.failUnless(parse_version('1.1') not in r) + self.failUnless('1.2' in r) + self.failUnless('1.1' not in r) + self.failUnless(foo_dist not in r) + self.failUnless(twist11 not in r) + self.failUnless(twist12 in r) + + def testAdvancedContains(self): + r, = parse_requirements("Foo>=1.2,<=1.3,==1.9,>2.0,!=2.5,<3.0,==4.5") + for v in ('1.2','1.2.2','1.3','1.9','2.0.1','2.3','2.6','3.0c1','4.5'): + self.failUnless(v in r, (v,r)) + for v in ('1.2c1','1.3.1','1.5','1.9.1','2.0','2.5','3.0','4.0'): + self.failUnless(v not in r, (v,r)) + + + def testOptionsAndHashing(self): + r1 = Requirement.parse("Twisted[foo,bar]>=1.2") + r2 = Requirement.parse("Twisted[bar,FOO]>=1.2") + r3 = Requirement.parse("Twisted[BAR,FOO]>=1.2.0") + self.assertEqual(r1,r2) + self.assertEqual(r1,r3) + self.assertEqual(r1.extras, ("foo","bar")) + self.assertEqual(r2.extras, ("bar","foo")) # extras are normalized + self.assertEqual(hash(r1), hash(r2)) + self.assertEqual( + hash(r1), hash(("twisted", ((">=",parse_version("1.2")),), + frozenset(["foo","bar"]))) + ) + + def testVersionEquality(self): + r1 = Requirement.parse("setuptools==0.3a2") + r2 = Requirement.parse("setuptools!=0.3a4") + d = Distribution.from_filename + + self.failIf(d("setuptools-0.3a4.egg") in r1) + self.failIf(d("setuptools-0.3a1.egg") in r1) + self.failIf(d("setuptools-0.3a4.egg") in r2) + + self.failUnless(d("setuptools-0.3a2.egg") in r1) + self.failUnless(d("setuptools-0.3a2.egg") in r2) + self.failUnless(d("setuptools-0.3a3.egg") in r2) + self.failUnless(d("setuptools-0.3a5.egg") in r2) + + + + + + + + + + + + + + +class ParseTests(TestCase): + + def testEmptyParse(self): + self.assertEqual(list(parse_requirements('')), []) + + def testYielding(self): + for inp,out in [ + ([], []), ('x',['x']), ([[]],[]), (' x\n y', ['x','y']), + (['x\n\n','y'], ['x','y']), + ]: + self.assertEqual(list(pkg_resources.yield_lines(inp)),out) + + def testSplitting(self): + self.assertEqual( + list( + pkg_resources.split_sections(""" + x + [Y] + z + + a + [b ] + # foo + c + [ d] + [q] + v + """ + ) + ), + [(None,["x"]), ("Y",["z","a"]), ("b",["c"]), ("d",[]), ("q",["v"])] + ) + self.assertRaises(ValueError,list,pkg_resources.split_sections("[foo")) + + def testSafeName(self): + self.assertEqual(safe_name("adns-python"), "adns-python") + self.assertEqual(safe_name("WSGI Utils"), "WSGI-Utils") + self.assertEqual(safe_name("WSGI Utils"), "WSGI-Utils") + self.assertEqual(safe_name("Money$$$Maker"), "Money-Maker") + self.assertNotEqual(safe_name("peak.web"), "peak-web") + + def testSafeVersion(self): + self.assertEqual(safe_version("1.2-1"), "1.2-1") + self.assertEqual(safe_version("1.2 alpha"), "1.2.alpha") + self.assertEqual(safe_version("2.3.4 20050521"), "2.3.4.20050521") + self.assertEqual(safe_version("Money$$$Maker"), "Money-Maker") + self.assertEqual(safe_version("peak.web"), "peak.web") + + def testSimpleRequirements(self): + self.assertEqual( + list(parse_requirements('Twis-Ted>=1.2-1')), + [Requirement('Twis-Ted',[('>=','1.2-1')], ())] + ) + self.assertEqual( + list(parse_requirements('Twisted >=1.2, \ # more\n<2.0')), + [Requirement('Twisted',[('>=','1.2'),('<','2.0')], ())] + ) + self.assertEqual( + Requirement.parse("FooBar==1.99a3"), + Requirement("FooBar", [('==','1.99a3')], ()) + ) + self.assertRaises(ValueError,Requirement.parse,">=2.3") + self.assertRaises(ValueError,Requirement.parse,"x\\") + self.assertRaises(ValueError,Requirement.parse,"x==2 q") + self.assertRaises(ValueError,Requirement.parse,"X==1\nY==2") + self.assertRaises(ValueError,Requirement.parse,"#") + + def testVersionEquality(self): + def c(s1,s2): + p1, p2 = parse_version(s1),parse_version(s2) + self.assertEqual(p1,p2, (s1,s2,p1,p2)) + + c('1.2-rc1', '1.2rc1') + c('0.4', '0.4.0') + c('0.4.0.0', '0.4.0') + c('0.4.0-0', '0.4-0') + c('0pl1', '0.0pl1') + c('0pre1', '0.0c1') + c('0.0.0preview1', '0c1') + c('0.0c1', '0-rc1') + c('1.2a1', '1.2.a.1'); c('1.2...a', '1.2a') + + def testVersionOrdering(self): + def c(s1,s2): + p1, p2 = parse_version(s1),parse_version(s2) + self.failUnless(p1 + "easy_install will install a package that is already there" + + + "be more like distutils with regard to --prefix=" + + + "respect the PYTHONPATH" + (Note: this patch does not work as intended when site.py has been modified. + This will be fixed in a future version.) + + + "python setup.py --help-commands raises exception due to conflict with distribute" + + + * The following patch to setuptools introduced bugs, and has been reverted + in zetuptoolz: + + $ svn log -r 45514 + ------------------------------------------------------------------------ + r45514 | phillip.eby | 2006-04-18 04:03:16 +0100 (Tue, 18 Apr 2006) | 9 lines + + Backport pkgutil, pydoc, and doctest from the 2.5 trunk to setuptools + 0.7 trunk. (Sideport?) Setuptools 0.7 will install these in place of + the 2.3/2.4 versions (at least of pydoc and doctest) to let them work + properly with eggs. pkg_resources now depends on the 2.5 pkgutil, which + is included here as _pkgutil, to work around the fact that some system + packagers will install setuptools without overriding the stdlib modules. + But users who install their own setuptools will get them, and the system + packaged people probably don't need them. + ------------------------------------------------------------------------ + + + * If unpatched setuptools decides that it needs to change an existing site.py + file that appears not to have been written by it (because the file does not + start with "def __boot():"), it aborts the installation. + zetuptoolz leaves the file alone and outputs a warning, but continues with + the installation. + + + * The scripts written by zetuptoolz have the following extra line: + + # generated by zetuptoolz + + after the header. + + + * Windows-specific changes (native Python): + + Python distributions may have command-line or GUI scripts. + On Windows, setuptools creates an executable wrapper to run each + script. zetuptools uses a different approach that does not require + an .exe wrapper. It writes approximately the same script file that + is used on other platforms, but with a .pyscript extension. + It also writes a shell-script wrapper (without any extension) that + is only used when the command is run from a Cygwin shell. + + Some of the advantages of this approach are: + + * Unicode arguments are preserved (although the program will + need to use some Windows-specific code to get at them in + current versions of Python); + * it works correctly on 64-bit Windows; + * the zetuptoolz distribution need not contain either any + binary executables, or any C code that needs to be compiled. + + See setuptools\tests\win_script_wrapper.txt for further details. + + Installing or building any distribution on Windows will automatically + associate .pyscript with the native Python interpreter for the current + user. It will also add .pyscript and .pyw to the PATHEXT variable for + the current user, which is needed to allow scripts to be run without + typing any extension. + + There is an additional setup.py command that can be used to perform + these steps separately (which isn't normally needed, but might be + useful for debugging): + + python setup.py scriptsetup + + Adding the --allusers option, i.e. + + python setup.py scriptsetup --allusers + + will make the .pyscript association and changes to the PATHEXT variable + for all users of this Windows installation, except those that have it + overridden in their per-user environment. In this case setup.py must be + run with Administrator privileges, e.g. from a Command Prompt whose + shortcut has been set to run as Administrator. diff -Nru tahoe-lafs-1.9.2/src/allmydata/__init__.py tahoe-lafs-1.10.0/src/allmydata/__init__.py --- tahoe-lafs-1.9.2/src/allmydata/__init__.py 2012-07-03 16:52:42.000000000 +0000 +++ tahoe-lafs-1.10.0/src/allmydata/__init__.py 2013-09-03 15:38:27.000000000 +0000 @@ -190,15 +190,11 @@ trace_info = (etype, str(emsg), ([None] + traceback.extract_tb(etrace))[-1]) packages.append( (pkgname, (None, None, trace_info)) ) else: - if 'sqlite' in pkgname: - packages.append( (pkgname, (get_version(module, 'version'), package_dir(module.__file__), - 'sqlite %s' % (get_version(module, 'sqlite_version'),))) ) - else: - comment = None - if pkgname == 'setuptools' and hasattr(module, '_distribute'): - # distribute does not report its version in any module variables - comment = 'distribute' - packages.append( (pkgname, (get_version(module, '__version__'), package_dir(module.__file__), comment)) ) + comment = None + if pkgname == 'setuptools' and hasattr(module, '_distribute'): + # distribute does not report its version in any module variables + comment = 'distribute' + packages.append( (pkgname, (get_version(module, '__version__'), package_dir(module.__file__), comment)) ) elif pkgname == 'python': packages.append( (pkgname, (platform.python_version(), sys.executable, None)) ) elif pkgname == 'platform': @@ -273,7 +269,7 @@ """This function returns a list of errors due to any failed cross-checks.""" errors = [] - not_pkg_resourceable = set(['sqlite3', 'python', 'platform', __appname__.lower()]) + not_pkg_resourceable = set(['python', 'platform', __appname__.lower()]) not_import_versionable = set(['zope.interface', 'mock', 'pyasn1']) ignorable = set(['argparse', 'pyutil', 'zbase32', 'distribute', 'twisted-web', 'twisted-core', 'twisted-conch']) @@ -295,6 +291,12 @@ continue pr_ver, pr_loc = pkg_resources_vers_and_locs[name] + if imp_ver is None and imp_loc is None: + errors.append("Warning: dependency %r could not be imported. pkg_resources thought it should be possible " + "to import version %r from %r.\nThe exception trace was %r." + % (name, pr_ver, pr_loc, imp_comment)) + continue + try: pr_normver = normalized_version(pr_ver) except Exception, e: @@ -355,24 +357,15 @@ errors = [] - # we require 2.4.4 on non-UCS-2, non-Redhat builds to avoid - # we require 2.4.3 on non-UCS-2 Redhat, because 2.4.3 is common on Redhat-based distros and will have patched the above bug - # we require at least 2.4.2 in any case to avoid a bug in the base64 module: - if sys.maxunicode == 65535: - if sys.version_info < (2, 4, 2) or sys.version_info[0] > 2: - errors.append("Tahoe-LAFS current requires Python v2.4.2 or greater " - "for a UCS-2 build (but less than v3), not %r" % - (sys.version_info,)) - elif platform.platform().lower().find('redhat') >= 0: - if sys.version_info < (2, 4, 3) or sys.version_info[0] > 2: - errors.append("Tahoe-LAFS current requires Python v2.4.3 or greater " - "on Redhat-based distributions (but less than v3), not %r" % - (sys.version_info,)) - else: - if sys.version_info < (2, 4, 4) or sys.version_info[0] > 2: - errors.append("Tahoe-LAFS current requires Python v2.4.4 or greater " - "for a non-UCS-2 build (but less than v3), not %r" % - (sys.version_info,)) + # We require at least 2.6 on all platforms. + # (On Python 3, we'll have failed long before this point.) + if sys.version_info < (2, 6): + try: + version_string = ".".join(map(str, sys.version_info)) + except Exception: + version_string = repr(sys.version_info) + errors.append("Tahoe-LAFS currently requires Python v2.6 or greater (but less than v3), not %s" + % (version_string,)) vers_and_locs = dict(_vers_and_locs_list) for requirement in install_requires: @@ -403,7 +396,7 @@ info = info + " (%s)" % str(loc) res.append(info) - output = ",\n".join(res) + "\n" + output = "\n".join(res) + "\n" if not hasattr(sys, 'frozen'): errors = cross_check_pkg_resources_versus_import() diff -Nru tahoe-lafs-1.9.2/src/allmydata/_auto_deps.py tahoe-lafs-1.10.0/src/allmydata/_auto_deps.py --- tahoe-lafs-1.9.2/src/allmydata/_auto_deps.py 2012-07-03 16:55:44.000000000 +0000 +++ tahoe-lafs-1.10.0/src/allmydata/_auto_deps.py 2013-09-03 15:38:27.000000000 +0000 @@ -14,15 +14,22 @@ # Feisty has simplejson 1.4 "simplejson >= 1.4", + # zope.interface >= 3.6.0 is required for Twisted >= 12.1.0. # zope.interface 3.6.3 and 3.6.4 are incompatible with Nevow (#1435). - "zope.interface <= 3.6.2, >= 3.6.5", + "zope.interface == 3.6.0, == 3.6.1, == 3.6.2, >= 3.6.5", - # On Windows we need at least Twisted 9.0 to avoid an indirect dependency on pywin32. - # On Linux we need at least Twisted 10.1.0 for inotify support used by the drop-upload - # frontend. - # We also need Twisted 10.1 for the FTP frontend in order for Twisted's FTP server to - # support asynchronous close. - "Twisted >= 10.1.0", + # * On Windows we need at least Twisted 9.0 to avoid an indirect + # dependency on pywin32. + # * On Linux we need at least Twisted 10.1.0 for inotify support used by + # the drop-upload frontend. + # * We also need Twisted 10.1 for the FTP frontend in order for Twisted's + # FTP server to support asynchronous close. + # * When the cloud backend lands, it will depend on Twisted 10.2.0 which + # includes the fix to https://twistedmatrix.com/trac/ticket/411 + # * The SFTP frontend depends on Twisted 11.0.0 to fix the SSH server + # rekeying bug http://twistedmatrix.com/trac/ticket/4395 + # + "Twisted >= 11.0.0", # * foolscap < 0.5.1 had a performance bug which spent O(N**2) CPU for # transferring large mutable files of size N. @@ -51,8 +58,11 @@ "pycrypto == 2.1.0, == 2.3, >= 2.4.1", "pyasn1 >= 0.0.8a", - # http://www.voidspace.org.uk/python/mock/ - "mock", + # http://www.voidspace.org.uk/python/mock/ , 0.8.0 provides "call" + "mock >= 0.8.0", + + # pycryptopp-0.6.0 includes ed25519 + "pycryptopp >= 0.6.0", # Will be needed to test web apps, but not yet. See #1001. #"windmill >= 1.3", @@ -78,30 +88,7 @@ ] def require_more(): - import platform, sys - - if platform.machine().lower() in ['i386', 'x86', 'i686', 'x86_64', 'amd64', '']: - # pycryptopp v0.5.20 fixes bugs in SHA-256 and AES on x86 or amd64 - # (from Crypto++ revisions 470, 471, 480, 492). The '' is there - # in case platform.machine is broken and this is actually an x86 - # or amd64 machine. - install_requires.append("pycryptopp >= 0.5.20") - else: - # pycryptopp v0.5.13 had a new bundled version of Crypto++ - # (v5.6.0) and a new bundled version of setuptools (although that - # shouldn't make any difference to users of pycryptopp). - install_requires.append("pycryptopp >= 0.5.14") - - # Sqlite comes built into Python >= 2.5, and is provided by the "pysqlite" - # distribution for Python 2.4. - try: - import sqlite3 - sqlite3 # hush pyflakes - package_imports.append(('sqlite3', 'sqlite3')) - except ImportError: - # pysqlite v2.0.5 was shipped in Ubuntu 6.06 LTS "dapper" and Nexenta NCP 1. - install_requires.append("pysqlite >= 2.0.5") - package_imports.append(('pysqlite', 'pysqlite2.dbapi2')) + import sys # Don't try to get the version number of setuptools in frozen builds, because # that triggers 'site' processing that causes failures. Note that frozen diff -Nru tahoe-lafs-1.9.2/src/allmydata/_version.py tahoe-lafs-1.10.0/src/allmydata/_version.py --- tahoe-lafs-1.9.2/src/allmydata/_version.py 2012-07-03 18:51:07.000000000 +0000 +++ tahoe-lafs-1.10.0/src/allmydata/_version.py 2013-09-03 15:38:27.000000000 +0000 @@ -1,7 +1,8 @@ -# This _version.py is generated from darcs metadata by the tahoe setup.py -# and the "darcsver" package. +# This _version.py is generated from git metadata by the tahoe setup.py. __pkgname__ = "allmydata-tahoe" -verstr = "1.9.2" +real_version = "1.10.0" +full_version = "f9af0633d8da426cbcaed3ff05ab6d7128148bb0" +verstr = "1.10.0" __version__ = verstr diff -Nru tahoe-lafs-1.9.2/src/allmydata/check_results.py tahoe-lafs-1.10.0/src/allmydata/check_results.py --- tahoe-lafs-1.9.2/src/allmydata/check_results.py 2012-05-14 02:07:21.000000000 +0000 +++ tahoe-lafs-1.10.0/src/allmydata/check_results.py 2013-09-03 15:38:27.000000000 +0000 @@ -1,75 +1,149 @@ from zope.interface import implements from allmydata.interfaces import ICheckResults, ICheckAndRepairResults, \ - IDeepCheckResults, IDeepCheckAndRepairResults, IURI + IDeepCheckResults, IDeepCheckAndRepairResults, IURI, IDisplayableServer from allmydata.util import base32 class CheckResults: implements(ICheckResults) - def __init__(self, uri, storage_index): + def __init__(self, uri, storage_index, + healthy, recoverable, needs_rebalancing, + count_shares_needed, count_shares_expected, + count_shares_good, count_good_share_hosts, + count_recoverable_versions, count_unrecoverable_versions, + servers_responding, sharemap, + count_wrong_shares, list_corrupt_shares, count_corrupt_shares, + list_incompatible_shares, count_incompatible_shares, + summary, report, share_problems, servermap): assert IURI.providedBy(uri), uri - self.uri = uri - self.storage_index = storage_index - self.problems = [] - self.data = {"count-corrupt-shares": 0, - "list-corrupt-shares": [], - } - self.summary = "" - self.report = [] - - def set_healthy(self, healthy): - self.healthy = bool(healthy) - if self.healthy: - assert (not hasattr(self, 'recoverable')) or self.recoverable, hasattr(self, 'recoverable') and self.recoverable - self.recoverable = True - self.summary = "healthy" + self._uri = uri + self._storage_index = storage_index + self._summary = "" + self._healthy = bool(healthy) + if self._healthy: + assert recoverable + if not summary: + summary = "healthy" else: - self.summary = "not healthy" - def set_recoverable(self, recoverable): - self.recoverable = recoverable - if not self.recoverable: - assert (not hasattr(self, 'healthy')) or not self.healthy - self.healthy = False - def set_needs_rebalancing(self, needs_rebalancing): - self.needs_rebalancing_p = bool(needs_rebalancing) - def set_data(self, data): - self.data.update(data) - def set_summary(self, summary): + if not summary: + summary = "not healthy" + self._recoverable = recoverable + if not self._recoverable: + assert not self._healthy + self._needs_rebalancing_p = bool(needs_rebalancing) + + self._count_shares_needed = count_shares_needed + self._count_shares_expected = count_shares_expected + self._count_shares_good = count_shares_good + self._count_good_share_hosts = count_good_share_hosts + self._count_recoverable_versions = count_recoverable_versions + self._count_unrecoverable_versions = count_unrecoverable_versions + for server in servers_responding: + assert IDisplayableServer.providedBy(server), server + self._servers_responding = servers_responding + for shnum, servers in sharemap.items(): + for server in servers: + assert IDisplayableServer.providedBy(server), server + self._sharemap = sharemap + self._count_wrong_shares = count_wrong_shares + for (server, SI, shnum) in list_corrupt_shares: + assert IDisplayableServer.providedBy(server), server + self._list_corrupt_shares = list_corrupt_shares + self._count_corrupt_shares = count_corrupt_shares + for (server, SI, shnum) in list_incompatible_shares: + assert IDisplayableServer.providedBy(server), server + self._list_incompatible_shares = list_incompatible_shares + self._count_incompatible_shares = count_incompatible_shares + assert isinstance(summary, str) # should be a single string - self.summary = summary - def set_report(self, report): + self._summary = summary assert not isinstance(report, str) # should be list of strings - self.report = report - - def set_servermap(self, smap): - # mutable only - self.servermap = smap - + self._report = report + if servermap: + from allmydata.mutable.servermap import ServerMap + assert isinstance(servermap, ServerMap), servermap + self._servermap = servermap # mutable only + self._share_problems = share_problems def get_storage_index(self): - return self.storage_index + return self._storage_index def get_storage_index_string(self): - return base32.b2a(self.storage_index) + return base32.b2a(self._storage_index) def get_uri(self): - return self.uri + return self._uri def is_healthy(self): - return self.healthy + return self._healthy def is_recoverable(self): - return self.recoverable + return self._recoverable def needs_rebalancing(self): - return self.needs_rebalancing_p - def get_data(self): - return self.data + return self._needs_rebalancing_p + + def get_encoding_needed(self): + return self._count_shares_needed + def get_encoding_expected(self): + return self._count_shares_expected + + def get_share_counter_good(self): + return self._count_shares_good + def get_share_counter_wrong(self): + return self._count_wrong_shares + + def get_corrupt_shares(self): + return self._list_corrupt_shares + + def get_incompatible_shares(self): + return self._list_incompatible_shares + + def get_servers_responding(self): + return self._servers_responding + + def get_host_counter_good_shares(self): + return self._count_good_share_hosts + + def get_version_counter_recoverable(self): + return self._count_recoverable_versions + def get_version_counter_unrecoverable(self): + return self._count_unrecoverable_versions + + def get_sharemap(self): + return self._sharemap + + def as_dict(self): + sharemap = {} + for shnum, servers in self._sharemap.items(): + sharemap[shnum] = sorted([s.get_serverid() for s in servers]) + responding = [s.get_serverid() for s in self._servers_responding] + corrupt = [(s.get_serverid(), SI, shnum) + for (s, SI, shnum) in self._list_corrupt_shares] + incompatible = [(s.get_serverid(), SI, shnum) + for (s, SI, shnum) in self._list_incompatible_shares] + d = {"count-shares-needed": self._count_shares_needed, + "count-shares-expected": self._count_shares_expected, + "count-shares-good": self._count_shares_good, + "count-good-share-hosts": self._count_good_share_hosts, + "count-recoverable-versions": self._count_recoverable_versions, + "count-unrecoverable-versions": self._count_unrecoverable_versions, + "servers-responding": responding, + "sharemap": sharemap, + "count-wrong-shares": self._count_wrong_shares, + "list-corrupt-shares": corrupt, + "count-corrupt-shares": self._count_corrupt_shares, + "list-incompatible-shares": incompatible, + "count-incompatible-shares": self._count_incompatible_shares, + } + return d def get_summary(self): - return self.summary + return self._summary def get_report(self): - return self.report + return self._report + def get_share_problems(self): + return self._share_problems def get_servermap(self): - return self.servermap + return self._servermap class CheckAndRepairResults: implements(ICheckAndRepairResults) @@ -148,7 +222,7 @@ self.objects_unrecoverable += 1 self.all_results[tuple(path)] = r self.all_results_by_storage_index[r.get_storage_index()] = r - self.corrupt_shares.extend(r.get_data()["list-corrupt-shares"]) + self.corrupt_shares.extend(r.get_corrupt_shares()) def get_counters(self): return {"count-objects-checked": self.objects_checked, @@ -186,7 +260,7 @@ self.objects_unhealthy += 1 if not pre_repair.is_recoverable(): self.objects_unrecoverable += 1 - self.corrupt_shares.extend(pre_repair.get_data()["list-corrupt-shares"]) + self.corrupt_shares.extend(pre_repair.get_corrupt_shares()) if r.get_repair_attempted(): self.repairs_attempted += 1 if r.get_repair_successful(): @@ -201,7 +275,7 @@ self.objects_unrecoverable_post_repair += 1 self.all_results[tuple(path)] = r self.all_results_by_storage_index[r.get_storage_index()] = r - self.corrupt_shares_post_repair.extend(post_repair.get_data()["list-corrupt-shares"]) + self.corrupt_shares_post_repair.extend(post_repair.get_corrupt_shares()) def get_counters(self): return {"count-objects-checked": self.objects_checked, diff -Nru tahoe-lafs-1.9.2/src/allmydata/client.py tahoe-lafs-1.10.0/src/allmydata/client.py --- tahoe-lafs-1.9.2/src/allmydata/client.py 2012-05-14 02:24:30.000000000 +0000 +++ tahoe-lafs-1.10.0/src/allmydata/client.py 2013-09-03 15:38:27.000000000 +0000 @@ -1,12 +1,10 @@ import os, stat, time, weakref -from allmydata.interfaces import RIStorageServer from allmydata import node from zope.interface import implements from twisted.internet import reactor, defer from twisted.application import service from twisted.application.internet import TimerService -from foolscap.api import Referenceable from pycryptopp.publickey import rsa import allmydata @@ -16,14 +14,13 @@ from allmydata.immutable.offloaded import Helper from allmydata.control import ControlServer from allmydata.introducer.client import IntroducerClient -from allmydata.util import hashutil, base32, pollmixin, log +from allmydata.util import hashutil, base32, pollmixin, log, keyutil, idlib from allmydata.util.encodingutil import get_filesystem_encoding from allmydata.util.abbreviate import parse_abbreviated_size from allmydata.util.time_format import parse_duration, parse_date from allmydata.stats import StatsProvider from allmydata.history import History -from allmydata.interfaces import IStatsProducer, RIStubClient, \ - SDMF_VERSION, MDMF_VERSION +from allmydata.interfaces import IStatsProducer, SDMF_VERSION, MDMF_VERSION from allmydata.nodemaker import NodeMaker from allmydata.blacklist import Blacklist from allmydata.node import OldConfigOptionError @@ -35,9 +32,6 @@ TiB=1024*GiB PiB=1024*TiB -class StubClient(Referenceable): - implements(RIStubClient) - def _make_secret(): return base32.b2a(os.urandom(hashutil.CRYPTO_VAL_SIZE)) + "\n" @@ -140,6 +134,7 @@ self.init_introducer_client() self.init_stats_provider() self.init_secrets() + self.init_node_key() self.init_storage() self.init_control() self.helper = None @@ -169,12 +164,24 @@ if webport: self.init_web(webport) # strports string + def _sequencer(self): + seqnum_s = self.get_config_from_file("announcement-seqnum") + if not seqnum_s: + seqnum_s = "0" + seqnum = int(seqnum_s.strip()) + seqnum += 1 # increment + self.write_config("announcement-seqnum", "%d\n" % seqnum) + nonce = _make_secret().strip() + return seqnum, nonce + def init_introducer_client(self): self.introducer_furl = self.get_config("client", "introducer.furl") ic = IntroducerClient(self.tub, self.introducer_furl, self.nickname, str(allmydata.__full_version__), - str(self.OLDEST_SUPPORTED_VERSION)) + str(self.OLDEST_SUPPORTED_VERSION), + self.get_app_versions(), + self._sequencer) self.introducer_client = ic # hold off on starting the IntroducerClient until our tub has been # started, so we'll have a useful address on our RemoteReference, so @@ -203,6 +210,46 @@ self.convergence = base32.a2b(convergence_s) self._secret_holder = SecretHolder(lease_secret, self.convergence) + def init_node_key(self): + # we only create the key once. On all subsequent runs, we re-use the + # existing key + def _make_key(): + sk_vs,vk_vs = keyutil.make_keypair() + return sk_vs+"\n" + sk_vs = self.get_or_create_private_config("node.privkey", _make_key) + sk,vk_vs = keyutil.parse_privkey(sk_vs.strip()) + self.write_config("node.pubkey", vk_vs+"\n") + self._node_key = sk + + def get_long_nodeid(self): + # this matches what IServer.get_longname() says about us elsewhere + vk_bytes = self._node_key.get_verifying_key_bytes() + return "v0-"+base32.b2a(vk_bytes) + + def get_long_tubid(self): + return idlib.nodeid_b2a(self.nodeid) + + def _init_permutation_seed(self, ss): + seed = self.get_config_from_file("permutation-seed") + if not seed: + have_shares = ss.have_shares() + if have_shares: + # if the server has shares but not a recorded + # permutation-seed, then it has been around since pre-#466 + # days, and the clients who uploaded those shares used our + # TubID as a permutation-seed. We should keep using that same + # seed to keep the shares in the same place in the permuted + # ring, so those clients don't have to perform excessive + # searches. + seed = base32.b2a(self.nodeid) + else: + # otherwise, we're free to use the more natural seed of our + # pubkey-based serverid + vk_bytes = self._node_key.get_verifying_key_bytes() + seed = base32.b2a(vk_bytes) + self.write_config("permutation-seed", seed+"\n") + return seed.strip() + def init_storage(self): # should we run a storage server (and publish it for others to use)? if not self.get_config("storage", "enabled", True, boolean=True): @@ -212,12 +259,12 @@ storedir = os.path.join(self.basedir, self.STOREDIR) data = self.get_config("storage", "reserved_space", None) - reserved = None try: reserved = parse_abbreviated_size(data) except ValueError: log.msg("[storage]reserved_space= contains unparseable value %s" % data) + raise if reserved is None: reserved = 0 discard = self.get_config("storage", "debug_discard", False, @@ -262,14 +309,19 @@ def _publish(res): furl_file = os.path.join(self.basedir, "private", "storage.furl").encode(get_filesystem_encoding()) furl = self.tub.registerReference(ss, furlFile=furl_file) - ri_name = RIStorageServer.__remote_name__ - self.introducer_client.publish(furl, "storage", ri_name) + ann = {"anonymous-storage-FURL": furl, + "permutation-seed-base32": self._init_permutation_seed(ss), + } + self.introducer_client.publish("storage", ann, self._node_key) d.addCallback(_publish) d.addErrback(log.err, facility="tahoe.init", level=log.BAD, umid="aLGBKw") def init_client(self): helper_furl = self.get_config("client", "helper.furl", None) + if helper_furl in ("None", ""): + helper_furl = None + DEP = self.DEFAULT_ENCODING_PARAMETERS DEP["k"] = int(self.get_config("client", "shares.needed", DEP["k"])) DEP["n"] = int(self.get_config("client", "shares.total", DEP["n"])) @@ -281,7 +333,6 @@ self.terminator.setServiceParent(self) self.add_service(Uploader(helper_furl, self.stats_provider, self.history)) - self.init_stub_client() self.init_blacklist() self.init_nodemaker() @@ -321,20 +372,6 @@ def get_storage_broker(self): return self.storage_broker - def init_stub_client(self): - def _publish(res): - # we publish an empty object so that the introducer can count how - # many clients are connected and see what versions they're - # running. - sc = StubClient() - furl = self.tub.registerReference(sc) - ri_name = RIStubClient.__remote_name__ - self.introducer_client.publish(furl, "stub_client", ri_name) - d = self.when_tub_ready() - d.addCallback(_publish) - d.addErrback(log.err, facility="tahoe.init", - level=log.BAD, umid="OEHq3g") - def init_blacklist(self): fn = os.path.join(self.basedir, "access.blacklist") self.blacklist = Blacklist(fn) diff -Nru tahoe-lafs-1.9.2/src/allmydata/control.py tahoe-lafs-1.10.0/src/allmydata/control.py --- tahoe-lafs-1.9.2/src/allmydata/control.py 2012-05-14 02:07:21.000000000 +0000 +++ tahoe-lafs-1.10.0/src/allmydata/control.py 2013-09-03 15:38:27.000000000 +0000 @@ -64,7 +64,7 @@ uploader = self.parent.getServiceNamed("uploader") u = upload.FileName(filename, convergence=convergence) d = uploader.upload(u) - d.addCallback(lambda results: results.uri) + d.addCallback(lambda results: results.get_uri()) return d def remote_download_from_uri_to_file(self, uri, filename): @@ -186,7 +186,7 @@ else: up = upload.FileName(fn, convergence=None) d1 = self.parent.upload(up) - d1.addCallback(lambda results: results.uri) + d1.addCallback(lambda results: results.get_uri()) d1.addCallback(_record_uri, i) d1.addCallback(_upload_one_file, i+1) return d1 diff -Nru tahoe-lafs-1.9.2/src/allmydata/dirnode.py tahoe-lafs-1.10.0/src/allmydata/dirnode.py --- tahoe-lafs-1.9.2/src/allmydata/dirnode.py 2012-05-14 02:07:21.000000000 +0000 +++ tahoe-lafs-1.10.0/src/allmydata/dirnode.py 2013-09-03 15:38:27.000000000 +0000 @@ -134,6 +134,7 @@ if entries is None: entries = {} precondition(isinstance(entries, dict), entries) + precondition(overwrite in (True, False, "only-files"), overwrite) # keys of 'entries' may not be normalized. self.entries = entries self.overwrite = overwrite @@ -160,7 +161,7 @@ raise ExistingChildError("child %s already exists" % quote_output(name, encoding='utf-8')) if self.overwrite == "only-files" and IDirectoryNode.providedBy(children[name][0]): - raise ExistingChildError("child %s already exists" % quote_output(name, encoding='utf-8')) + raise ExistingChildError("child %s already exists as a directory" % quote_output(name, encoding='utf-8')) metadata = children[name][1].copy() metadata = update_metadata(metadata, new_metadata, now) @@ -597,7 +598,8 @@ return defer.fail(NotWriteableError()) d = self._uploader.upload(uploadable) d.addCallback(lambda results: - self._create_and_validate_node(results.uri, None, name)) + self._create_and_validate_node(results.get_uri(), None, + name)) d.addCallback(lambda node: self.set_node(name, node, metadata, overwrite)) return d @@ -641,22 +643,36 @@ def move_child_to(self, current_child_namex, new_parent, new_child_namex=None, overwrite=True): - """I take one of my children and move them to a new parent. The child - is referenced by name. On the new parent, the child will live under - 'new_child_name', which defaults to 'current_child_name'. I return a - Deferred that fires when the operation finishes.""" - + """ + I take one of my child links and move it to a new parent. The child + link is referenced by name. In the new parent, the child link will live + at 'new_child_namex', which defaults to 'current_child_namex'. I return + a Deferred that fires when the operation finishes. + 'new_child_namex' and 'current_child_namex' need not be normalized. + + The overwrite parameter may be True (overwrite any existing child), + False (error if the new child link already exists), or "only-files" + (error if the new child link exists and points to a directory). + """ if self.is_readonly() or new_parent.is_readonly(): return defer.fail(NotWriteableError()) current_child_name = normalize(current_child_namex) if new_child_namex is None: - new_child_namex = current_child_name - d = self.get(current_child_name) - def sn(child): - return new_parent.set_node(new_child_namex, child, + new_child_name = current_child_name + else: + new_child_name = normalize(new_child_namex) + + from_uri = self.get_write_uri() + if new_parent.get_write_uri() == from_uri and new_child_name == current_child_name: + # needed for correctness, otherwise we would delete the child + return defer.succeed("redundant rename/relink") + + d = self.get_child_and_metadata(current_child_name) + def _got_child( (child, metadata) ): + return new_parent.set_node(new_child_name, child, metadata, overwrite=overwrite) - d.addCallback(sn) + d.addCallback(_got_child) d.addCallback(lambda child: self.delete(current_child_name)) return d diff -Nru tahoe-lafs-1.9.2/src/allmydata/frontends/sftpd.py tahoe-lafs-1.10.0/src/allmydata/frontends/sftpd.py --- tahoe-lafs-1.9.2/src/allmydata/frontends/sftpd.py 2012-05-14 02:24:30.000000000 +0000 +++ tahoe-lafs-1.10.0/src/allmydata/frontends/sftpd.py 2013-09-03 15:38:27.000000000 +0000 @@ -22,10 +22,11 @@ from twisted.internet.interfaces import ITransport from twisted.internet import defer -from twisted.internet.interfaces import IFinishableConsumer +from twisted.internet.interfaces import IConsumer from foolscap.api import eventually from allmydata.util import deferredutil +from allmydata.util.assertutil import _assert, precondition from allmydata.util.consumer import download_to_data from allmydata.interfaces import IFileNode, IDirectoryNode, ExistingChildError, \ NoSuchChildError, ChildOfWrongTypeError @@ -233,7 +234,7 @@ if childnode and size is None: size = childnode.get_size() if size is not None: - assert isinstance(size, (int, long)) and not isinstance(size, bool), repr(size) + _assert(isinstance(size, (int, long)) and not isinstance(size, bool), size=size) attrs['size'] = size perms = S_IFREG | 0666 @@ -277,7 +278,7 @@ def _direntry_for(filenode_or_parent, childname, filenode=None): - assert isinstance(childname, (unicode, NoneType)), childname + precondition(isinstance(childname, (unicode, NoneType)), childname=childname) if childname is None: filenode_or_parent = filenode @@ -293,7 +294,7 @@ class OverwriteableFileConsumer(PrefixingLogMixin): - implements(IFinishableConsumer) + implements(IConsumer) """I act both as a consumer for the download of the original file contents, and as a wrapper for a temporary file that records the downloaded data and any overwrites. I use a priority queue to keep track of which regions of the file have been overwritten @@ -320,12 +321,9 @@ self.milestones = [] # empty heap of (offset, d) self.overwrites = [] # empty heap of (start, end) self.is_closed = False - self.done = self.when_reached(download_size) # adds a milestone - self.is_done = False - def _signal_done(ign): - if noisy: self.log("DONE", level=NOISY) - self.is_done = True - self.done.addCallback(_signal_done) + + self.done = defer.Deferred() + self.done_status = None # None -> not complete, Failure -> download failed, str -> download succeeded self.producer = None def get_file(self): @@ -348,7 +346,7 @@ self.download_size = size if self.downloaded >= self.download_size: - self.finish() + self.download_done("size changed") def registerProducer(self, p, streaming): if noisy: self.log(".registerProducer(%r, streaming=%r)" % (p, streaming), level=NOISY) @@ -361,7 +359,7 @@ p.resumeProducing() else: def _iterate(): - if not self.is_done: + if self.done_status is None: p.resumeProducing() eventually(_iterate) _iterate() @@ -428,13 +426,17 @@ return if noisy: self.log("MILESTONE %r %r" % (next, d), level=NOISY) heapq.heappop(self.milestones) - eventually(d.callback, None) + eventually_callback(d)("reached") if milestone >= self.download_size: - self.finish() + self.download_done("reached download size") def overwrite(self, offset, data): if noisy: self.log(".overwrite(%r, )" % (offset, len(data)), level=NOISY) + if self.is_closed: + self.log("overwrite called on a closed OverwriteableFileConsumer", level=WEIRD) + raise SFTPError(FX_BAD_MESSAGE, "cannot write to a closed file handle") + if offset > self.current_size: # Normally writing at an offset beyond the current end-of-file # would leave a hole that appears filled with zeroes. However, an @@ -462,6 +464,9 @@ The caller must perform no more overwrites until the Deferred has fired.""" if noisy: self.log(".read(%r, %r), current_size = %r" % (offset, length, self.current_size), level=NOISY) + if self.is_closed: + self.log("read called on a closed OverwriteableFileConsumer", level=WEIRD) + raise SFTPError(FX_BAD_MESSAGE, "cannot read from a closed file handle") # Note that the overwrite method is synchronous. When a write request is processed # (e.g. a writeChunk request on the async queue of GeneralSFTPFile), overwrite will @@ -477,47 +482,68 @@ if noisy: self.log("truncating read to %r bytes" % (length,), level=NOISY) needed = min(offset + length, self.download_size) - d = self.when_reached(needed) - def _reached(ign): + + # If we fail to reach the needed number of bytes, the read request will fail. + d = self.when_reached_or_failed(needed) + def _reached_in_read(res): # It is not necessarily the case that self.downloaded >= needed, because # the file might have been truncated (thus truncating the download) and # then extended. - assert self.current_size >= offset + length, (self.current_size, offset, length) - if noisy: self.log("self.f = %r" % (self.f,), level=NOISY) + _assert(self.current_size >= offset + length, + current_size=self.current_size, offset=offset, length=length) + if noisy: self.log("_reached_in_read(%r), self.f = %r" % (res, self.f,), level=NOISY) self.f.seek(offset) return self.f.read(length) - d.addCallback(_reached) + d.addCallback(_reached_in_read) return d - def when_reached(self, index): - if noisy: self.log(".when_reached(%r)" % (index,), level=NOISY) - if index <= self.downloaded: # already reached - if noisy: self.log("already reached %r" % (index,), level=NOISY) - return defer.succeed(None) + def when_reached_or_failed(self, index): + if noisy: self.log(".when_reached_or_failed(%r)" % (index,), level=NOISY) + def _reached(res): + if noisy: self.log("reached %r with result %r" % (index, res), level=NOISY) + return res + + if self.done_status is not None: + return defer.execute(_reached, self.done_status) + if index <= self.downloaded: # already reached successfully + if noisy: self.log("already reached %r successfully" % (index,), level=NOISY) + return defer.succeed("already reached successfully") d = defer.Deferred() - def _reached(ign): - if noisy: self.log("reached %r" % (index,), level=NOISY) - return ign d.addCallback(_reached) heapq.heappush(self.milestones, (index, d)) return d def when_done(self): - return self.done + d = defer.Deferred() + self.done.addCallback(lambda ign: eventually_callback(d)(self.done_status)) + return d - def finish(self): - """Called by the producer when it has finished producing, or when we have - received enough bytes, or as a result of a close. Defined by IFinishableConsumer.""" + def download_done(self, res): + _assert(isinstance(res, (str, Failure)), res=res) + # Only the first call to download_done counts, but we log subsequent calls + # (multiple calls are normal). + if self.done_status is not None: + self.log("IGNORING extra call to download_done with result %r; previous result was %r" + % (res, self.done_status), level=OPERATIONAL) + return + + self.log("DONE with result %r" % (res,), level=OPERATIONAL) + + # We avoid errbacking self.done so that we are not left with an 'Unhandled error in Deferred' + # in case when_done() is never called. Instead we stash the failure in self.done_status, + # from where the callback added in when_done() can retrieve it. + self.done_status = res + eventually_callback(self.done)(None) while len(self.milestones) > 0: (next, d) = self.milestones[0] - if noisy: self.log("MILESTONE FINISH %r %r" % (next, d), level=NOISY) + if noisy: self.log("MILESTONE FINISH %r %r %r" % (next, d, res), level=NOISY) heapq.heappop(self.milestones) # The callback means that the milestone has been reached if # it is ever going to be. Note that the file may have been # truncated to before the milestone. - eventually(d.callback, None) + eventually_callback(d)(res) def close(self): if not self.is_closed: @@ -526,10 +552,14 @@ self.f.close() except Exception, e: self.log("suppressed %r from close of temporary file %r" % (e, self.f), level=WEIRD) - self.finish() + self.download_done("closed") + return self.done_status def unregisterProducer(self): - pass + # This will happen just before our client calls download_done, which will tell + # us the outcome of the download; we don't know the outcome at this point. + self.producer = None + self.log("producer unregistered", level=NOISY) SIZE_THRESHOLD = 1000 @@ -546,7 +576,8 @@ PrefixingLogMixin.__init__(self, facility="tahoe.sftp", prefix=userpath) if noisy: self.log(".__init__(%r, %r, %r)" % (userpath, filenode, metadata), level=NOISY) - assert isinstance(userpath, str) and IFileNode.providedBy(filenode), (userpath, filenode) + precondition(isinstance(userpath, str) and IFileNode.providedBy(filenode), + userpath=userpath, filenode=filenode) self.filenode = filenode self.metadata = metadata self.async = download_to_data(filenode) @@ -574,9 +605,9 @@ # i.e. we respond with an EOF error iff offset is already at EOF. if offset >= len(data): - eventually(d.errback, SFTPError(FX_EOF, "read at or past end of file")) + eventually_errback(d)(Failure(SFTPError(FX_EOF, "read at or past end of file"))) else: - eventually(d.callback, data[offset:offset+length]) # truncated if offset+length > len(data) + eventually_callback(d)(data[offset:offset+length]) # truncated if offset+length > len(data) return data self.async.addCallbacks(_read, eventually_errback(d)) d.addBoth(_convert_error, request) @@ -629,7 +660,7 @@ if noisy: self.log(".__init__(%r, %r = %r, %r, )" % (userpath, flags, _repr_flags(flags), close_notify), level=NOISY) - assert isinstance(userpath, str), userpath + precondition(isinstance(userpath, str), userpath=userpath) self.userpath = userpath self.flags = flags self.close_notify = close_notify @@ -652,7 +683,10 @@ self.log(".open(parent=%r, childname=%r, filenode=%r, metadata=%r)" % (parent, childname, filenode, metadata), level=OPERATIONAL) - assert isinstance(childname, (unicode, NoneType)), childname + precondition(isinstance(childname, (unicode, NoneType)), childname=childname) + precondition(filenode is None or IFileNode.providedBy(filenode), filenode=filenode) + precondition(not self.closed, sftpfile=self) + # If the file has been renamed, the new (parent, childname) takes precedence. if self.parent is None: self.parent = parent @@ -661,29 +695,32 @@ self.filenode = filenode self.metadata = metadata - assert not self.closed, self tempfile_maker = EncryptedTemporaryFile if (self.flags & FXF_TRUNC) or not filenode: # We're either truncating or creating the file, so we don't need the old contents. self.consumer = OverwriteableFileConsumer(0, tempfile_maker) - self.consumer.finish() + self.consumer.download_done("download not needed") else: - assert IFileNode.providedBy(filenode), filenode - self.async.addCallback(lambda ignored: filenode.get_best_readable_version()) def _read(version): if noisy: self.log("_read", level=NOISY) download_size = version.get_size() - assert download_size is not None + _assert(download_size is not None) self.consumer = OverwriteableFileConsumer(download_size, tempfile_maker) - version.read(self.consumer, 0, None) + d = version.read(self.consumer, 0, None) + def _finished(res): + if not isinstance(res, Failure): + res = "download finished" + self.consumer.download_done(res) + d.addBoth(_finished) + # It is correct to drop d here. self.async.addCallback(_read) - eventually(self.async.callback, None) + eventually_callback(self.async)(None) if noisy: self.log("open done", level=NOISY) return self @@ -697,7 +734,8 @@ def rename(self, new_userpath, new_parent, new_childname): self.log(".rename(%r, %r, %r)" % (new_userpath, new_parent, new_childname), level=OPERATIONAL) - assert isinstance(new_userpath, str) and isinstance(new_childname, unicode), (new_userpath, new_childname) + precondition(isinstance(new_userpath, str) and isinstance(new_childname, unicode), + new_userpath=new_userpath, new_childname=new_childname) self.userpath = new_userpath self.parent = new_parent self.childname = new_childname @@ -735,7 +773,7 @@ def _read(ign): if noisy: self.log("_read in readChunk(%r, %r)" % (offset, length), level=NOISY) d2 = self.consumer.read(offset, length) - d2.addCallbacks(eventually_callback(d), eventually_errback(d)) + d2.addBoth(eventually_callback(d)) # It is correct to drop d2 here. return None self.async.addCallbacks(_read, eventually_errback(d)) @@ -779,6 +817,24 @@ # don't addErrback to self.async, just allow subsequent async ops to fail. return defer.succeed(None) + def _do_close(self, res, d=None): + if noisy: self.log("_do_close(%r)" % (res,), level=NOISY) + status = None + if self.consumer: + status = self.consumer.close() + + # We must close_notify before re-firing self.async. + if self.close_notify: + self.close_notify(self.userpath, self.parent, self.childname, self) + + if not isinstance(res, Failure) and isinstance(status, Failure): + res = status + + if d: + eventually_callback(d)(res) + elif isinstance(res, Failure): + self.log("suppressing %r" % (res,), level=OPERATIONAL) + def close(self): request = ".close()" self.log(request, level=OPERATIONAL) @@ -790,10 +846,14 @@ self.closed = True if not (self.flags & (FXF_WRITE | FXF_CREAT)): - def _readonly_close(): - if self.consumer: - self.consumer.close() - return defer.execute(_readonly_close) + # We never fail a close of a handle opened only for reading, even if the file + # failed to download. (We could not do so deterministically, because it would + # depend on whether we reached the point of failure before abandoning the + # download.) Any reads that depended on file content that could not be downloaded + # will have failed. It is important that we don't close the consumer until + # previous read operations have completed. + self.async.addBoth(self._do_close) + return defer.succeed(None) # We must capture the abandoned, parent, and childname variables synchronously # at the close call. This is needed by the correctness arguments in the comments @@ -807,22 +867,13 @@ # it is correct to optimize out the commit if it is False at the close call. has_changed = self.has_changed - def _committed(res): - if noisy: self.log("_committed(%r)" % (res,), level=NOISY) - - self.consumer.close() - - # We must close_notify before re-firing self.async. - if self.close_notify: - self.close_notify(self.userpath, self.parent, self.childname, self) - return res - - def _close(ign): + def _commit(ign): d2 = self.consumer.when_done() if self.filenode and self.filenode.is_mutable(): - self.log("update mutable file %r childname=%r metadata=%r" % (self.filenode, childname, self.metadata), level=OPERATIONAL) + self.log("update mutable file %r childname=%r metadata=%r" + % (self.filenode, childname, self.metadata), level=OPERATIONAL) if self.metadata.get('no-write', False) and not self.filenode.is_readonly(): - assert parent and childname, (parent, childname, self.metadata) + _assert(parent and childname, parent=parent, childname=childname, metadata=self.metadata) d2.addCallback(lambda ign: parent.set_metadata_for(childname, self.metadata)) d2.addCallback(lambda ign: self.filenode.overwrite(MutableFileHandle(self.consumer.get_file()))) @@ -832,22 +883,19 @@ u = FileHandle(self.consumer.get_file(), self.convergence) return parent.add_file(childname, u, metadata=self.metadata) d2.addCallback(_add_file) - - d2.addBoth(_committed) return d2 - d = defer.Deferred() - # If the file has been abandoned, we don't want the close operation to get "stuck", - # even if self.async fails to re-fire. Doing the close independently of self.async - # in that case ensures that dropping an ssh connection is sufficient to abandon + # even if self.async fails to re-fire. Completing the close independently of self.async + # in that case should ensure that dropping an ssh connection is sufficient to abandon # any heisenfiles that were not explicitly closed in that connection. if abandoned or not has_changed: - d.addCallback(_committed) + d = defer.succeed(None) + self.async.addBoth(self._do_close) else: - self.async.addCallback(_close) - - self.async.addCallbacks(eventually_callback(d), eventually_errback(d)) + d = defer.Deferred() + self.async.addCallback(_commit) + self.async.addBoth(self._do_close, d) d.addBoth(_convert_error, request) return d @@ -869,7 +917,7 @@ # self.filenode might be None, but that's ok. attrs = _populate_attrs(self.filenode, self.metadata, size=self.consumer.get_current_size()) - eventually(d.callback, attrs) + eventually_callback(d)(attrs) return None self.async.addCallbacks(_get, eventually_errback(d)) d.addBoth(_convert_error, request) @@ -907,7 +955,7 @@ # TODO: should we refuse to truncate a file opened with FXF_APPEND? # self.consumer.set_current_size(size) - eventually(d.callback, None) + eventually_callback(d)(None) return None self.async.addCallbacks(_set, eventually_errback(d)) d.addBoth(_convert_error, request) @@ -1002,7 +1050,7 @@ request = "._abandon_any_heisenfiles(%r, %r)" % (userpath, direntry) self.log(request, level=OPERATIONAL) - assert isinstance(userpath, str), userpath + precondition(isinstance(userpath, str), userpath=userpath) # First we synchronously mark all heisenfiles matching the userpath or direntry # as abandoned, and remove them from the two heisenfile dicts. Then we .sync() @@ -1051,9 +1099,9 @@ (from_userpath, from_parent, from_childname, to_userpath, to_parent, to_childname, overwrite)) self.log(request, level=OPERATIONAL) - assert (isinstance(from_userpath, str) and isinstance(from_childname, unicode) and - isinstance(to_userpath, str) and isinstance(to_childname, unicode)), \ - (from_userpath, from_childname, to_userpath, to_childname) + precondition((isinstance(from_userpath, str) and isinstance(from_childname, unicode) and + isinstance(to_userpath, str) and isinstance(to_childname, unicode)), + from_userpath=from_userpath, from_childname=from_childname, to_userpath=to_userpath, to_childname=to_childname) if noisy: self.log("all_heisenfiles = %r\nself._heisenfiles = %r" % (all_heisenfiles, self._heisenfiles), level=NOISY) @@ -1124,7 +1172,8 @@ request = "._update_attrs_for_heisenfiles(%r, %r, %r)" % (userpath, direntry, attrs) self.log(request, level=OPERATIONAL) - assert isinstance(userpath, str) and isinstance(direntry, str), (userpath, direntry) + _assert(isinstance(userpath, str) and isinstance(direntry, str), + userpath=userpath, direntry=direntry) files = [] if direntry in all_heisenfiles: @@ -1156,7 +1205,8 @@ request = "._sync_heisenfiles(%r, %r, ignore=%r)" % (userpath, direntry, ignore) self.log(request, level=OPERATIONAL) - assert isinstance(userpath, str) and isinstance(direntry, (str, NoneType)), (userpath, direntry) + _assert(isinstance(userpath, str) and isinstance(direntry, (str, NoneType)), + userpath=userpath, direntry=direntry) files = [] if direntry in all_heisenfiles: @@ -1180,7 +1230,8 @@ def _remove_heisenfile(self, userpath, parent, childname, file_to_remove): if noisy: self.log("._remove_heisenfile(%r, %r, %r, %r)" % (userpath, parent, childname, file_to_remove), level=NOISY) - assert isinstance(userpath, str) and isinstance(childname, (unicode, NoneType)), (userpath, childname) + _assert(isinstance(userpath, str) and isinstance(childname, (unicode, NoneType)), + userpath=userpath, childname=childname) direntry = _direntry_for(parent, childname) if direntry in all_heisenfiles: @@ -1206,8 +1257,9 @@ (existing_file, userpath, flags, _repr_flags(flags), parent, childname, filenode, metadata), level=NOISY) - assert (isinstance(userpath, str) and isinstance(childname, (unicode, NoneType)) and - (metadata is None or 'no-write' in metadata)), (userpath, childname, metadata) + _assert((isinstance(userpath, str) and isinstance(childname, (unicode, NoneType)) and + (metadata is None or 'no-write' in metadata)), + userpath=userpath, childname=childname, metadata=metadata) writing = (flags & (FXF_WRITE | FXF_CREAT)) != 0 direntry = _direntry_for(parent, childname, filenode) @@ -1649,7 +1701,7 @@ d2.addCallback(lambda ign: parent.get_child_and_metadata_at_path([childname])) def _got( (child, metadata) ): if noisy: self.log("_got( (%r, %r) )" % (child, metadata), level=NOISY) - assert IDirectoryNode.providedBy(parent), parent + _assert(IDirectoryNode.providedBy(parent), parent=parent) metadata['no-write'] = _no_write(parent.is_readonly(), child, metadata) d3 = child.get_current_size() d3.addCallback(lambda size: _populate_attrs(child, metadata, size=size)) @@ -1789,7 +1841,7 @@ def _path_from_string(self, pathstring): if noisy: self.log("CONVERT %r" % (pathstring,), level=NOISY) - assert isinstance(pathstring, str), pathstring + _assert(isinstance(pathstring, str), pathstring=pathstring) # The home directory is the root directory. pathstring = pathstring.strip("/") @@ -1918,7 +1970,7 @@ self._client = client def requestAvatar(self, avatarID, mind, interface): - assert interface == IConchUser, interface + _assert(interface == IConchUser, interface=interface) rootnode = self._client.create_node_from_uri(avatarID.rootcap) handler = SFTPUserHandler(self._client, rootnode, avatarID.username) return (interface, handler, handler.logout) diff -Nru tahoe-lafs-1.9.2/src/allmydata/immutable/checker.py tahoe-lafs-1.10.0/src/allmydata/immutable/checker.py --- tahoe-lafs-1.9.2/src/allmydata/immutable/checker.py 2012-07-01 23:00:50.000000000 +0000 +++ tahoe-lafs-1.10.0/src/allmydata/immutable/checker.py 2013-09-03 15:38:27.000000000 +0000 @@ -738,68 +738,69 @@ def _format_results(self, results): SI = self._verifycap.get_storage_index() - cr = CheckResults(self._verifycap, SI) - d = {} - d['count-shares-needed'] = self._verifycap.needed_shares - d['count-shares-expected'] = self._verifycap.total_shares - - verifiedshares = dictutil.DictOfSets() # {sharenum: set(serverid)} - servers = {} # {serverid: set(sharenums)} - corruptshare_locators = [] # (serverid, storageindex, sharenum) - incompatibleshare_locators = [] # (serverid, storageindex, sharenum) - servers_responding = set() # serverid + + verifiedshares = dictutil.DictOfSets() # {sharenum: set(server)} + servers = {} # {server: set(sharenums)} + corruptshare_locators = [] # (server, storageindex, sharenum) + incompatibleshare_locators = [] # (server, storageindex, sharenum) + servers_responding = set() # server for verified, server, corrupt, incompatible, responded in results: - server_id = server.get_serverid() - servers.setdefault(server_id, set()).update(verified) + servers.setdefault(server, set()).update(verified) for sharenum in verified: - verifiedshares.setdefault(sharenum, set()).add(server_id) + verifiedshares.setdefault(sharenum, set()).add(server) for sharenum in corrupt: - corruptshare_locators.append((server_id, SI, sharenum)) + corruptshare_locators.append((server, SI, sharenum)) for sharenum in incompatible: - incompatibleshare_locators.append((server_id, SI, sharenum)) + incompatibleshare_locators.append((server, SI, sharenum)) if responded: - servers_responding.add(server_id) + servers_responding.add(server) - d['count-shares-good'] = len(verifiedshares) - d['count-good-share-hosts'] = len([s for s in servers.keys() if servers[s]]) + good_share_hosts = len([s for s in servers.keys() if servers[s]]) assert len(verifiedshares) <= self._verifycap.total_shares, (verifiedshares.keys(), self._verifycap.total_shares) if len(verifiedshares) == self._verifycap.total_shares: - cr.set_healthy(True) - cr.set_summary("Healthy") + healthy = True + summary = "Healthy" else: - cr.set_healthy(False) - cr.set_summary("Not Healthy: %d shares (enc %d-of-%d)" % - (len(verifiedshares), - self._verifycap.needed_shares, - self._verifycap.total_shares)) + healthy = False + summary = ("Not Healthy: %d shares (enc %d-of-%d)" % + (len(verifiedshares), + self._verifycap.needed_shares, + self._verifycap.total_shares)) if len(verifiedshares) >= self._verifycap.needed_shares: - cr.set_recoverable(True) - d['count-recoverable-versions'] = 1 - d['count-unrecoverable-versions'] = 0 + recoverable = 1 + unrecoverable = 0 else: - cr.set_recoverable(False) - d['count-recoverable-versions'] = 0 - d['count-unrecoverable-versions'] = 1 - - d['servers-responding'] = list(servers_responding) - d['sharemap'] = verifiedshares - # no such thing as wrong shares of an immutable file - d['count-wrong-shares'] = 0 - d['list-corrupt-shares'] = corruptshare_locators - d['count-corrupt-shares'] = len(corruptshare_locators) - d['list-incompatible-shares'] = incompatibleshare_locators - d['count-incompatible-shares'] = len(incompatibleshare_locators) - + recoverable = 0 + unrecoverable = 1 # The file needs rebalancing if the set of servers that have at least # one share is less than the number of uniquely-numbered shares # available. # TODO: this may be wrong, see ticket #1115 comment:27 and ticket #1784. - cr.set_needs_rebalancing(d['count-good-share-hosts'] < d['count-shares-good']) + needs_rebalancing = bool(good_share_hosts < len(verifiedshares)) - cr.set_data(d) + cr = CheckResults(self._verifycap, SI, + healthy=healthy, recoverable=bool(recoverable), + needs_rebalancing=needs_rebalancing, + count_shares_needed=self._verifycap.needed_shares, + count_shares_expected=self._verifycap.total_shares, + count_shares_good=len(verifiedshares), + count_good_share_hosts=good_share_hosts, + count_recoverable_versions=recoverable, + count_unrecoverable_versions=unrecoverable, + servers_responding=list(servers_responding), + sharemap=verifiedshares, + count_wrong_shares=0, # no such thing, for immutable + list_corrupt_shares=corruptshare_locators, + count_corrupt_shares=len(corruptshare_locators), + list_incompatible_shares=incompatibleshare_locators, + count_incompatible_shares=len(incompatibleshare_locators), + summary=summary, + report=[], + share_problems=[], + servermap=None) return cr diff -Nru tahoe-lafs-1.9.2/src/allmydata/immutable/encode.py tahoe-lafs-1.10.0/src/allmydata/immutable/encode.py --- tahoe-lafs-1.9.2/src/allmydata/immutable/encode.py 2012-05-14 02:07:22.000000000 +0000 +++ tahoe-lafs-1.10.0/src/allmydata/immutable/encode.py 2013-09-03 15:38:27.000000000 +0000 @@ -678,3 +678,5 @@ def get_uri_extension_data(self): return self.uri_extension_data + def get_uri_extension_hash(self): + return self.uri_extension_hash diff -Nru tahoe-lafs-1.9.2/src/allmydata/immutable/filenode.py tahoe-lafs-1.10.0/src/allmydata/immutable/filenode.py --- tahoe-lafs-1.9.2/src/allmydata/immutable/filenode.py 2012-07-01 23:00:41.000000000 +0000 +++ tahoe-lafs-1.10.0/src/allmydata/immutable/filenode.py 2013-09-03 15:38:27.000000000 +0000 @@ -1,6 +1,5 @@ import binascii -import copy import time now = time.time from zope.interface import implements @@ -87,71 +86,91 @@ def raise_error(self): pass + def is_mutable(self): + return False def check_and_repair(self, monitor, verify=False, add_lease=False): - verifycap = self._verifycap - storage_index = verifycap.storage_index - sb = self._storage_broker - servers = sb.get_connected_servers() - sh = self._secret_holder - - c = Checker(verifycap=verifycap, servers=servers, - verify=verify, add_lease=add_lease, secret_holder=sh, + c = Checker(verifycap=self._verifycap, + servers=self._storage_broker.get_connected_servers(), + verify=verify, add_lease=add_lease, + secret_holder=self._secret_holder, monitor=monitor) d = c.start() - def _maybe_repair(cr): - crr = CheckAndRepairResults(storage_index) - crr.pre_repair_results = cr - if cr.is_healthy(): - crr.post_repair_results = cr - return defer.succeed(crr) - else: - crr.repair_attempted = True - crr.repair_successful = False # until proven successful - def _gather_repair_results(ur): - assert IUploadResults.providedBy(ur), ur - # clone the cr (check results) to form the basis of the - # prr (post-repair results) - prr = CheckResults(cr.uri, cr.storage_index) - prr.data = copy.deepcopy(cr.data) - - sm = prr.data['sharemap'] - assert isinstance(sm, DictOfSets), sm - sm.update(ur.sharemap) - servers_responding = set(prr.data['servers-responding']) - for shnum, serverids in ur.sharemap.items(): - servers_responding.update(serverids) - servers_responding = sorted(servers_responding) - prr.data['servers-responding'] = servers_responding - prr.data['count-shares-good'] = len(sm) - good_hosts = len(reduce(set.union, sm.itervalues(), set())) - prr.data['count-good-share-hosts'] = good_hosts - is_healthy = bool(len(sm) >= verifycap.total_shares) - is_recoverable = bool(len(sm) >= verifycap.needed_shares) - prr.set_healthy(is_healthy) - prr.set_recoverable(is_recoverable) - crr.repair_successful = is_healthy - - # TODO: this may be wrong, see ticket #1115 comment:27 and ticket #1784. - prr.set_needs_rebalancing(len(sm) >= verifycap.total_shares) - - crr.post_repair_results = prr - return crr - def _repair_error(f): - # as with mutable repair, I'm not sure if I want to pass - # through a failure or not. TODO - crr.repair_successful = False - crr.repair_failure = f - return f - r = Repairer(self, storage_broker=sb, secret_holder=sh, - monitor=monitor) - d = r.start() - d.addCallbacks(_gather_repair_results, _repair_error) - return d + d.addCallback(self._maybe_repair, monitor) + return d - d.addCallback(_maybe_repair) + def _maybe_repair(self, cr, monitor): + crr = CheckAndRepairResults(self._verifycap.storage_index) + crr.pre_repair_results = cr + if cr.is_healthy(): + crr.post_repair_results = cr + return defer.succeed(crr) + + crr.repair_attempted = True + crr.repair_successful = False # until proven successful + def _repair_error(f): + # as with mutable repair, I'm not sure if I want to pass + # through a failure or not. TODO + crr.repair_successful = False + crr.repair_failure = f + return f + r = Repairer(self, storage_broker=self._storage_broker, + secret_holder=self._secret_holder, + monitor=monitor) + d = r.start() + d.addCallbacks(self._gather_repair_results, _repair_error, + callbackArgs=(cr, crr,)) return d + def _gather_repair_results(self, ur, cr, crr): + assert IUploadResults.providedBy(ur), ur + # clone the cr (check results) to form the basis of the + # prr (post-repair results) + + verifycap = self._verifycap + servers_responding = set(cr.get_servers_responding()) + sm = DictOfSets() + assert isinstance(cr.get_sharemap(), DictOfSets) + for shnum, servers in cr.get_sharemap().items(): + for server in servers: + sm.add(shnum, server) + for shnum, servers in ur.get_sharemap().items(): + for server in servers: + sm.add(shnum, server) + servers_responding.add(server) + servers_responding = sorted(servers_responding) + + good_hosts = len(reduce(set.union, sm.values(), set())) + is_healthy = bool(len(sm) >= verifycap.total_shares) + is_recoverable = bool(len(sm) >= verifycap.needed_shares) + + # TODO: this may be wrong, see ticket #1115 comment:27 and ticket #1784. + needs_rebalancing = bool(len(sm) >= verifycap.total_shares) + + prr = CheckResults(cr.get_uri(), cr.get_storage_index(), + healthy=is_healthy, recoverable=is_recoverable, + needs_rebalancing=needs_rebalancing, + count_shares_needed=verifycap.needed_shares, + count_shares_expected=verifycap.total_shares, + count_shares_good=len(sm), + count_good_share_hosts=good_hosts, + count_recoverable_versions=int(is_recoverable), + count_unrecoverable_versions=int(not is_recoverable), + servers_responding=list(servers_responding), + sharemap=sm, + count_wrong_shares=0, # no such thing as wrong, for immutable + list_corrupt_shares=cr.get_corrupt_shares(), + count_corrupt_shares=len(cr.get_corrupt_shares()), + list_incompatible_shares=cr.get_incompatible_shares(), + count_incompatible_shares=len(cr.get_incompatible_shares()), + summary="", + report=[], + share_problems=[], + servermap=None) + crr.repair_successful = is_healthy + crr.post_repair_results = prr + return crr + def check(self, monitor, verify=False, add_lease=False): verifycap = self._verifycap sb = self._storage_broker diff -Nru tahoe-lafs-1.9.2/src/allmydata/immutable/offloaded.py tahoe-lafs-1.10.0/src/allmydata/immutable/offloaded.py --- tahoe-lafs-1.9.2/src/allmydata/immutable/offloaded.py 2012-06-13 17:03:08.000000000 +0000 +++ tahoe-lafs-1.10.0/src/allmydata/immutable/offloaded.py 2013-09-03 15:38:27.000000000 +0000 @@ -137,14 +137,13 @@ def __init__(self, storage_index, helper, storage_broker, secret_holder, incoming_file, encoding_file, - results, log_number): + log_number): self._storage_index = storage_index self._helper = helper self._incoming_file = incoming_file self._encoding_file = encoding_file self._upload_id = si_b2a(storage_index)[:5] self._log_number = log_number - self._results = results self._upload_status = upload.UploadStatus() self._upload_status.set_helper(False) self._upload_status.set_storage_index(storage_index) @@ -160,6 +159,7 @@ self._reader = LocalCiphertextReader(self, storage_index, encoding_file) self._finished_observers = observer.OneShotObserverList() + self._started = time.time() d = self._fetcher.when_done() d.addCallback(lambda res: self._reader.start()) d.addCallback(lambda res: self.start_encrypted(self._reader)) @@ -171,31 +171,26 @@ kwargs['facility'] = "tahoe.helper.chk" return upload.CHKUploader.log(self, *args, **kwargs) - def start(self): - self._started = time.time() - # determine if we need to upload the file. If so, return ({},self) . - # If not, return (UploadResults,None) . + def remote_get_version(self): + return self.VERSION + + def remote_upload(self, reader): + # reader is an RIEncryptedUploadable. I am specified to return an + # UploadResults dictionary. + + # Log how much ciphertext we need to get. self.log("deciding whether to upload the file or not", level=log.NOISY) if os.path.exists(self._encoding_file): # we have the whole file, and we might be encoding it (or the # encode/upload might have failed, and we need to restart it). self.log("ciphertext already in place", level=log.UNUSUAL) - return (self._results, self) - if os.path.exists(self._incoming_file): + elif os.path.exists(self._incoming_file): # we have some of the file, but not all of it (otherwise we'd be # encoding). The caller might be useful. self.log("partial ciphertext already present", level=log.UNUSUAL) - return (self._results, self) - # we don't remember uploading this file - self.log("no ciphertext yet", level=log.NOISY) - return (self._results, self) - - def remote_get_version(self): - return self.VERSION - - def remote_upload(self, reader): - # reader is an RIEncryptedUploadable. I am specified to return an - # UploadResults dictionary. + else: + # we don't remember uploading this file + self.log("no ciphertext yet", level=log.NOISY) # let our fetcher pull ciphertext from the reader. self._fetcher.add_reader(reader) @@ -205,19 +200,38 @@ # and inform the client when the upload has finished return self._finished_observers.when_fired() - def _finished(self, uploadresults): - precondition(isinstance(uploadresults.verifycapstr, str), uploadresults.verifycapstr) - assert interfaces.IUploadResults.providedBy(uploadresults), uploadresults - r = uploadresults - v = uri.from_string(r.verifycapstr) - r.uri_extension_hash = v.uri_extension_hash + def _finished(self, ur): + assert interfaces.IUploadResults.providedBy(ur), ur + vcapstr = ur.get_verifycapstr() + precondition(isinstance(vcapstr, str), vcapstr) + v = uri.from_string(vcapstr) f_times = self._fetcher.get_times() - r.timings["cumulative_fetch"] = f_times["cumulative_fetch"] - r.ciphertext_fetched = self._fetcher.get_ciphertext_fetched() - r.timings["total_fetch"] = f_times["total"] + + hur = upload.HelperUploadResults() + hur.timings = {"cumulative_fetch": f_times["cumulative_fetch"], + "total_fetch": f_times["total"], + } + for key,val in ur.get_timings().items(): + hur.timings[key] = val + hur.uri_extension_hash = v.uri_extension_hash + hur.ciphertext_fetched = self._fetcher.get_ciphertext_fetched() + hur.preexisting_shares = ur.get_preexisting_shares() + # hur.sharemap needs to be {shnum: set(serverid)} + hur.sharemap = {} + for shnum, servers in ur.get_sharemap().items(): + hur.sharemap[shnum] = set([s.get_serverid() for s in servers]) + # and hur.servermap needs to be {serverid: set(shnum)} + hur.servermap = {} + for server, shnums in ur.get_servermap().items(): + hur.servermap[server.get_serverid()] = set(shnums) + hur.pushed_shares = ur.get_pushed_shares() + hur.file_size = ur.get_file_size() + hur.uri_extension_data = ur.get_uri_extension_data() + hur.verifycapstr = vcapstr + self._reader.close() os.unlink(self._encoding_file) - self._finished_observers.fire(r) + self._finished_observers.fire(hur) self._helper.upload_finished(self._storage_index, v.size) del self._reader @@ -303,14 +317,7 @@ if os.path.exists(self._encoding_file): self.log("ciphertext already present, bypassing fetch", level=log.UNUSUAL) - # we'll still need the plaintext hashes (when - # LocalCiphertextReader.get_plaintext_hashtree_leaves() is - # called), and currently the easiest way to get them is to ask - # the sender for the last byte of ciphertext. That will provoke - # them into reading and hashing (but not sending) everything - # else. - have = os.stat(self._encoding_file)[stat.ST_SIZE] - d = self.call("read_encrypted", have-1, 1) + d = defer.succeed(None) else: # first, find out how large the file is going to be d = self.call("get_size") @@ -489,7 +496,6 @@ { }, "application-version": str(allmydata.__full_version__), } - chk_upload_helper_class = CHKUploadHelper MAX_UPLOAD_STATUSES = 10 def __init__(self, basedir, storage_broker, secret_holder, @@ -564,48 +570,15 @@ def remote_upload_chk(self, storage_index): self.count("chk_upload_helper.upload_requests") - r = upload.UploadResults() - started = time.time() - si_s = si_b2a(storage_index) - lp = self.log(format="helper: upload_chk query for SI %(si)s", si=si_s) - incoming_file = os.path.join(self._chk_incoming, si_s) - encoding_file = os.path.join(self._chk_encoding, si_s) + lp = self.log(format="helper: upload_chk query for SI %(si)s", + si=si_b2a(storage_index)) if storage_index in self._active_uploads: self.log("upload is currently active", parent=lp) uh = self._active_uploads[storage_index] - return uh.start() + return (None, uh) - d = self._check_for_chk_already_in_grid(storage_index, r, lp) - def _checked(already_present): - elapsed = time.time() - started - r.timings['existence_check'] = elapsed - if already_present: - # the necessary results are placed in the UploadResults - self.count("chk_upload_helper.upload_already_present") - self.log("file already found in grid", parent=lp) - return (r, None) - - self.count("chk_upload_helper.upload_need_upload") - # the file is not present in the grid, by which we mean there are - # less than 'N' shares available. - self.log("unable to find file in the grid", parent=lp, - level=log.NOISY) - # We need an upload helper. Check our active uploads again in - # case there was a race. - if storage_index in self._active_uploads: - self.log("upload is currently active", parent=lp) - uh = self._active_uploads[storage_index] - else: - self.log("creating new upload helper", parent=lp) - uh = self.chk_upload_helper_class(storage_index, self, - self._storage_broker, - self._secret_holder, - incoming_file, encoding_file, - r, lp) - self._active_uploads[storage_index] = uh - self._add_upload(uh) - return uh.start() - d.addCallback(_checked) + d = self._check_chk(storage_index, lp) + d.addCallback(self._did_chk_check, storage_index, lp) def _err(f): self.log("error while checking for chk-already-in-grid", failure=f, level=log.WEIRD, parent=lp, umid="jDtxZg") @@ -613,7 +586,7 @@ d.addErrback(_err) return d - def _check_for_chk_already_in_grid(self, storage_index, results, lp): + def _check_chk(self, storage_index, lp): # see if this file is already in the grid lp2 = self.log("doing a quick check+UEBfetch", parent=lp, level=log.NOISY) @@ -624,16 +597,52 @@ if res: (sharemap, ueb_data, ueb_hash) = res self.log("found file in grid", level=log.NOISY, parent=lp) - results.uri_extension_hash = ueb_hash - results.sharemap = sharemap - results.uri_extension_data = ueb_data - results.preexisting_shares = len(sharemap) - results.pushed_shares = 0 - return True - return False + hur = upload.HelperUploadResults() + hur.uri_extension_hash = ueb_hash + hur.sharemap = sharemap + hur.uri_extension_data = ueb_data + hur.preexisting_shares = len(sharemap) + hur.pushed_shares = 0 + return hur + return None d.addCallback(_checked) return d + def _did_chk_check(self, already_present, storage_index, lp): + if already_present: + # the necessary results are placed in the UploadResults + self.count("chk_upload_helper.upload_already_present") + self.log("file already found in grid", parent=lp) + return (already_present, None) + + self.count("chk_upload_helper.upload_need_upload") + # the file is not present in the grid, by which we mean there are + # less than 'N' shares available. + self.log("unable to find file in the grid", parent=lp, + level=log.NOISY) + # We need an upload helper. Check our active uploads again in + # case there was a race. + if storage_index in self._active_uploads: + self.log("upload is currently active", parent=lp) + uh = self._active_uploads[storage_index] + else: + self.log("creating new upload helper", parent=lp) + uh = self._make_chk_upload_helper(storage_index, lp) + self._active_uploads[storage_index] = uh + self._add_upload(uh) + return (None, uh) + + def _make_chk_upload_helper(self, storage_index, lp): + si_s = si_b2a(storage_index) + incoming_file = os.path.join(self._chk_incoming, si_s) + encoding_file = os.path.join(self._chk_encoding, si_s) + uh = CHKUploadHelper(storage_index, self, + self._storage_broker, + self._secret_holder, + incoming_file, encoding_file, + lp) + return uh + def _add_upload(self, uh): self._all_uploads[uh] = None if self._history: diff -Nru tahoe-lafs-1.9.2/src/allmydata/immutable/upload.py tahoe-lafs-1.10.0/src/allmydata/immutable/upload.py --- tahoe-lafs-1.9.2/src/allmydata/immutable/upload.py 2012-06-21 19:42:46.000000000 +0000 +++ tahoe-lafs-1.10.0/src/allmydata/immutable/upload.py 2013-09-03 15:38:27.000000000 +0000 @@ -16,7 +16,7 @@ from allmydata.util.happinessutil import servers_of_happiness, \ shares_by_server, merge_servers, \ failure_message -from allmydata.util.assertutil import precondition +from allmydata.util.assertutil import precondition, _assert from allmydata.util.rrefutil import add_version_to_remote_reference from allmydata.interfaces import IUploadable, IUploader, IUploadResults, \ IEncryptedUploadable, RIEncryptedUploadable, IUploadStatus, \ @@ -32,8 +32,10 @@ class TooFullError(Exception): pass -class UploadResults(Copyable, RemoteCopy): - implements(IUploadResults) +# HelperUploadResults are what we get from the Helper, and to retain +# backwards compatibility with old Helpers we can't change the format. We +# convert them into a local UploadResults upon receipt. +class HelperUploadResults(Copyable, RemoteCopy): # note: don't change this string, it needs to match the value used on the # helper, and it does *not* need to match the fully-qualified # package/module/class name @@ -55,6 +57,53 @@ self.preexisting_shares = None # count of shares already present self.pushed_shares = None # count of shares we pushed +class UploadResults: + implements(IUploadResults) + + def __init__(self, file_size, + ciphertext_fetched, # how much the helper fetched + preexisting_shares, # count of shares already present + pushed_shares, # count of shares we pushed + sharemap, # {shnum: set(server)} + servermap, # {server: set(shnum)} + timings, # dict of name to number of seconds + uri_extension_data, + uri_extension_hash, + verifycapstr): + self._file_size = file_size + self._ciphertext_fetched = ciphertext_fetched + self._preexisting_shares = preexisting_shares + self._pushed_shares = pushed_shares + self._sharemap = sharemap + self._servermap = servermap + self._timings = timings + self._uri_extension_data = uri_extension_data + self._uri_extension_hash = uri_extension_hash + self._verifycapstr = verifycapstr + + def set_uri(self, uri): + self._uri = uri + + def get_file_size(self): + return self._file_size + def get_uri(self): + return self._uri + def get_ciphertext_fetched(self): + return self._ciphertext_fetched + def get_preexisting_shares(self): + return self._preexisting_shares + def get_pushed_shares(self): + return self._pushed_shares + def get_sharemap(self): + return self._sharemap + def get_servermap(self): + return self._servermap + def get_timings(self): + return self._timings + def get_uri_extension_data(self): + return self._uri_extension_data + def get_verifycapstr(self): + return self._verifycapstr # our current uri_extension is 846 bytes for small files, a few bytes # more for larger ones (since the filesize is encoded in decimal in a @@ -95,6 +144,8 @@ return ("" % (self._server.get_name(), si_b2a(self.storage_index)[:5])) + def get_server(self): + return self._server def get_serverid(self): return self._server.get_serverid() def get_name(self): @@ -573,6 +624,8 @@ CHUNKSIZE = 50*1024 def __init__(self, original, log_parent=None): + precondition(original.default_params_set, + "set_default_encoding_parameters not called on %r before wrapping with EncryptAnUploadable" % (original,)) self.original = IUploadable(original) self._log_number = log_parent self._encryptor = None @@ -847,12 +900,10 @@ self._secret_holder = secret_holder self._log_number = self.log("CHKUploader starting", parent=None) self._encoder = None - self._results = UploadResults() self._storage_index = None self._upload_status = UploadStatus() self._upload_status.set_helper(False) self._upload_status.set_active(True) - self._upload_status.set_results(self._results) # locate_all_shareholders() will create the following attribute: # self._server_trackers = {} # k: shnum, v: instance of ServerTracker @@ -950,7 +1001,7 @@ for st in upload_trackers], already_serverids) self.log(msgtempl % values, level=log.OPERATIONAL) # record already-present shares in self._results - self._results.preexisting_shares = len(already_serverids) + self._count_preexisting_shares = len(already_serverids) self._server_trackers = {} # k: shnum, v: instance of ServerTracker for tracker in upload_trackers: @@ -973,23 +1024,32 @@ encoder.set_shareholders(buckets, servermap) def _encrypted_done(self, verifycap): - """ Returns a Deferred that will fire with the UploadResults instance. """ - r = self._results - for shnum in self._encoder.get_shares_placed(): - server_tracker = self._server_trackers[shnum] - serverid = server_tracker.get_serverid() - r.sharemap.add(shnum, serverid) - r.servermap.add(serverid, shnum) - r.pushed_shares = len(self._encoder.get_shares_placed()) + """Returns a Deferred that will fire with the UploadResults instance.""" + e = self._encoder + sharemap = dictutil.DictOfSets() + servermap = dictutil.DictOfSets() + for shnum in e.get_shares_placed(): + server = self._server_trackers[shnum].get_server() + sharemap.add(shnum, server) + servermap.add(server, shnum) now = time.time() - r.file_size = self._encoder.file_size - r.timings["total"] = now - self._started - r.timings["storage_index"] = self._storage_index_elapsed - r.timings["peer_selection"] = self._server_selection_elapsed - r.timings.update(self._encoder.get_times()) - r.uri_extension_data = self._encoder.get_uri_extension_data() - r.verifycapstr = verifycap.to_string() - return r + timings = {} + timings["total"] = now - self._started + timings["storage_index"] = self._storage_index_elapsed + timings["peer_selection"] = self._server_selection_elapsed + timings.update(e.get_times()) + ur = UploadResults(file_size=e.file_size, + ciphertext_fetched=0, + preexisting_shares=self._count_preexisting_shares, + pushed_shares=len(e.get_shares_placed()), + sharemap=sharemap, + servermap=servermap, + timings=timings, + uri_extension_data=e.get_uri_extension_data(), + uri_extension_hash=e.get_uri_extension_hash(), + verifycapstr=verifycap.to_string()) + self._upload_status.set_results(ur) + return ur def get_upload_status(self): return self._upload_status @@ -1014,13 +1074,11 @@ class LiteralUploader: def __init__(self): - self._results = UploadResults() self._status = s = UploadStatus() s.set_storage_index(None) s.set_helper(False) s.set_progress(0, 1.0) s.set_active(False) - s.set_results(self._results) def start(self, uploadable): uploadable = IUploadable(uploadable) @@ -1028,7 +1086,6 @@ def _got_size(size): self._size = size self._status.set_size(size) - self._results.file_size = size return read_this_many_bytes(uploadable, size) d.addCallback(_got_size) d.addCallback(lambda data: uri.LiteralFileURI("".join(data))) @@ -1037,11 +1094,22 @@ return d def _build_results(self, uri): - self._results.uri = uri + ur = UploadResults(file_size=self._size, + ciphertext_fetched=0, + preexisting_shares=0, + pushed_shares=0, + sharemap={}, + servermap={}, + timings={}, + uri_extension_data=None, + uri_extension_hash=None, + verifycapstr=None) + ur.set_uri(uri) self._status.set_status("Finished") self._status.set_progress(1, 1.0) self._status.set_progress(2, 1.0) - return self._results + self._status.set_results(ur) + return ur def close(self): pass @@ -1122,8 +1190,9 @@ class AssistedUploader: - def __init__(self, helper): + def __init__(self, helper, storage_broker): self._helper = helper + self._storage_broker = storage_broker self._log_number = log.msg("AssistedUploader starting") self._storage_index = None self._upload_status = s = UploadStatus() @@ -1179,7 +1248,7 @@ d.addCallback(self._contacted_helper) return d - def _contacted_helper(self, (upload_results, upload_helper)): + def _contacted_helper(self, (helper_upload_results, upload_helper)): now = time.time() elapsed = now - self._time_contacting_helper_start self._elapsed_time_contacting_helper = elapsed @@ -1197,8 +1266,7 @@ return d self.log("helper says file is already uploaded", level=log.OPERATIONAL) self._upload_status.set_progress(1, 1.0) - self._upload_status.set_results(upload_results) - return upload_results + return helper_upload_results def _convert_old_upload_results(self, upload_results): # pre-1.3.0 helpers return upload results which contain a mapping @@ -1217,30 +1285,56 @@ if str in [type(v) for v in sharemap.values()]: upload_results.sharemap = None - def _build_verifycap(self, upload_results): + def _build_verifycap(self, helper_upload_results): self.log("upload finished, building readcap", level=log.OPERATIONAL) - self._convert_old_upload_results(upload_results) + self._convert_old_upload_results(helper_upload_results) self._upload_status.set_status("Building Readcap") - r = upload_results - assert r.uri_extension_data["needed_shares"] == self._needed_shares - assert r.uri_extension_data["total_shares"] == self._total_shares - assert r.uri_extension_data["segment_size"] == self._segment_size - assert r.uri_extension_data["size"] == self._size - r.verifycapstr = uri.CHKFileVerifierURI(self._storage_index, - uri_extension_hash=r.uri_extension_hash, - needed_shares=self._needed_shares, - total_shares=self._total_shares, size=self._size - ).to_string() + hur = helper_upload_results + assert hur.uri_extension_data["needed_shares"] == self._needed_shares + assert hur.uri_extension_data["total_shares"] == self._total_shares + assert hur.uri_extension_data["segment_size"] == self._segment_size + assert hur.uri_extension_data["size"] == self._size + + # hur.verifycap doesn't exist if already found + v = uri.CHKFileVerifierURI(self._storage_index, + uri_extension_hash=hur.uri_extension_hash, + needed_shares=self._needed_shares, + total_shares=self._total_shares, + size=self._size) + timings = {} + timings["storage_index"] = self._storage_index_elapsed + timings["contacting_helper"] = self._elapsed_time_contacting_helper + for key,val in hur.timings.items(): + if key == "total": + key = "helper_total" + timings[key] = val now = time.time() - r.file_size = self._size - r.timings["storage_index"] = self._storage_index_elapsed - r.timings["contacting_helper"] = self._elapsed_time_contacting_helper - if "total" in r.timings: - r.timings["helper_total"] = r.timings["total"] - r.timings["total"] = now - self._started + timings["total"] = now - self._started + + gss = self._storage_broker.get_stub_server + sharemap = {} + servermap = {} + for shnum, serverids in hur.sharemap.items(): + sharemap[shnum] = set([gss(serverid) for serverid in serverids]) + # if the file was already in the grid, hur.servermap is an empty dict + for serverid, shnums in hur.servermap.items(): + servermap[gss(serverid)] = set(shnums) + + ur = UploadResults(file_size=self._size, + # not if already found + ciphertext_fetched=hur.ciphertext_fetched, + preexisting_shares=hur.preexisting_shares, + pushed_shares=hur.pushed_shares, + sharemap=sharemap, + servermap=servermap, + timings=timings, + uri_extension_data=hur.uri_extension_data, + uri_extension_hash=hur.uri_extension_hash, + verifycapstr=v.to_string()) + self._upload_status.set_status("Finished") - self._upload_status.set_results(r) - return r + self._upload_status.set_results(ur) + return ur def get_upload_status(self): return self._upload_status @@ -1248,9 +1342,7 @@ class BaseUploadable: # this is overridden by max_segment_size default_max_segment_size = DEFAULT_MAX_SEGMENT_SIZE - default_encoding_param_k = 3 # overridden by encoding_parameters - default_encoding_param_happy = 7 - default_encoding_param_n = 10 + default_params_set = False max_segment_size = None encoding_param_k = None @@ -1276,8 +1368,10 @@ self.default_encoding_param_n = default_params["n"] if "max_segment_size" in default_params: self.default_max_segment_size = default_params["max_segment_size"] + self.default_params_set = True def get_all_encoding_parameters(self): + _assert(self.default_params_set, "set_default_encoding_parameters not called on %r" % (self,)) if self._all_encoding_parameters: return defer.succeed(self._all_encoding_parameters) @@ -1363,7 +1457,7 @@ def get_size(self): if self._size is not None: return defer.succeed(self._size) - self._filehandle.seek(0,2) + self._filehandle.seek(0, os.SEEK_END) size = self._filehandle.tell() self._size = size self._filehandle.seek(0) @@ -1473,8 +1567,9 @@ else: eu = EncryptAnUploadable(uploadable, self._parentmsgid) d2 = defer.succeed(None) + storage_broker = self.parent.get_storage_broker() if self._helper: - uploader = AssistedUploader(self._helper) + uploader = AssistedUploader(self._helper, storage_broker) d2.addCallback(lambda x: eu.get_storage_index()) d2.addCallback(lambda si: uploader.start(eu, si)) else: @@ -1490,9 +1585,9 @@ # Generate the uri from the verifycap plus the key. d3 = uploadable.get_encryption_key() def put_readcap_into_results(key): - v = uri.from_string(uploadresults.verifycapstr) + v = uri.from_string(uploadresults.get_verifycapstr()) r = uri.CHKFileURI(key, v.uri_extension_hash, v.needed_shares, v.total_shares, v.size) - uploadresults.uri = r.to_string() + uploadresults.set_uri(r.to_string()) return uploadresults d3.addCallback(put_readcap_into_results) return d3 diff -Nru tahoe-lafs-1.9.2/src/allmydata/interfaces.py tahoe-lafs-1.10.0/src/allmydata/interfaces.py --- tahoe-lafs-1.9.2/src/allmydata/interfaces.py 2012-06-16 04:12:38.000000000 +0000 +++ tahoe-lafs-1.10.0/src/allmydata/interfaces.py 2013-09-03 15:38:27.000000000 +0000 @@ -26,17 +26,10 @@ Number = IntegerConstraint(8) # 2**(8*8) == 16EiB ~= 18e18 ~= 18 exabytes Offset = Number ReadSize = int # the 'int' constraint is 2**31 == 2Gib -- large files are processed in not-so-large increments -WriteEnablerSecret = Hash # used to protect mutable bucket modifications -LeaseRenewSecret = Hash # used to protect bucket lease renewal requests -LeaseCancelSecret = Hash # used to protect bucket lease cancellation requests - -class RIStubClient(RemoteInterface): - """Each client publishes a service announcement for a dummy object called - the StubClient. This object doesn't actually offer any services, but the - announcement helps the Introducer keep track of which clients are - subscribed (so the grid admin can keep track of things like the size of - the grid and the client versions in use. This is the (empty) - RemoteInterface for the StubClient.""" +WriteEnablerSecret = Hash # used to protect mutable share modifications +LeaseRenewSecret = Hash # used to protect lease renewal requests +LeaseCancelSecret = Hash # was used to protect lease cancellation requests + class RIBucketWriter(RemoteInterface): """ Objects of this kind live on the server side. """ @@ -56,6 +49,7 @@ """ return None + class RIBucketReader(RemoteInterface): def read(offset=Offset, length=ReadSize): return ShareData @@ -66,12 +60,13 @@ failures. I will record their concern so that my operator can manually inspect the shares in question. I return None. - This is a wrapper around RIStorageServer.advise_corrupt_share(), - which is tied to a specific share, and therefore does not need the + This is a wrapper around RIStorageServer.advise_corrupt_share() + that is tied to a specific share, and therefore does not need the extra share-identifying arguments. Please see that method for full documentation. """ + TestVector = ListOf(TupleOf(Offset, ReadSize, str, str)) # elements are (offset, length, operator, specimen) # operator is one of "lt, le, eq, ne, ge, gt" @@ -88,6 +83,7 @@ ReadData = ListOf(ShareData) # returns data[offset:offset+length] for each element of TestVector + class RIStorageServer(RemoteInterface): __remote_name__ = "RIStorageServer.tahoe.allmydata.com" @@ -112,7 +108,9 @@ This secret is generated by the client and stored for later comparison by the server. Each server is given a different secret. - @param cancel_secret: Like renew_secret, but protects bucket decref. + @param cancel_secret: This no longer allows lease cancellation, but + must still be a unique value identifying the + lease. XXX stop relying on it to be unique. @param canary: If the canary is lost before close(), the bucket is deleted. @return: tuple of (alreadygot, allocated), where alreadygot is what we @@ -143,8 +141,8 @@ For mutable shares, if the given renew_secret does not match an existing lease, IndexError will be raised with a note listing the server-nodeids on the existing leases, so leases on migrated shares - can be renewed or cancelled. For immutable shares, IndexError - (without the note) will be raised. + can be renewed. For immutable shares, IndexError (without the note) + will be raised. """ return Any() @@ -193,7 +191,9 @@ This secret is generated by the client and stored for later comparison by the server. Each server is given a different secret. - @param cancel_secret: Like renew_secret, but protects bucket decref. + @param cancel_secret: This no longer allows lease cancellation, but + must still be a unique value identifying the + lease. XXX stop relying on it to be unique. The 'secrets' argument is a tuple of (write_enabler, renew_secret, cancel_secret). The first is required to perform any write. The @@ -284,43 +284,44 @@ (binary) storage index string, and 'shnum' is the integer share number. 'reason' is a human-readable explanation of the problem, probably including some expected hash values and the computed ones - which did not match. Corruption advisories for mutable shares should + that did not match. Corruption advisories for mutable shares should include a hash of the public key (the same value that appears in the mutable-file verify-cap), since the current share format does not store that on disk. """ + class IStorageBucketWriter(Interface): """ Objects of this kind live on the client side. """ - def put_block(segmentnum=int, data=ShareData): - """@param data: For most segments, this data will be 'blocksize' - bytes in length. The last segment might be shorter. - @return: a Deferred that fires (with None) when the operation completes - """ - - def put_plaintext_hashes(hashes=ListOf(Hash)): + def put_block(segmentnum, data): """ + @param segmentnum=int + @param data=ShareData: For most segments, this data will be 'blocksize' + bytes in length. The last segment might be shorter. @return: a Deferred that fires (with None) when the operation completes """ - def put_crypttext_hashes(hashes=ListOf(Hash)): + def put_crypttext_hashes(hashes): """ + @param hashes=ListOf(Hash) @return: a Deferred that fires (with None) when the operation completes """ - def put_block_hashes(blockhashes=ListOf(Hash)): + def put_block_hashes(blockhashes): """ + @param blockhashes=ListOf(Hash) @return: a Deferred that fires (with None) when the operation completes """ - def put_share_hashes(sharehashes=ListOf(TupleOf(int, Hash))): + def put_share_hashes(sharehashes): """ + @param sharehashes=ListOf(TupleOf(int, Hash)) @return: a Deferred that fires (with None) when the operation completes """ - def put_uri_extension(data=URIExtensionData): + def put_uri_extension(data): """This block of data contains integrity-checking information (hashes of plaintext, crypttext, and shares), as well as encoding parameters that are necessary to recover the data. This is a serialized dict @@ -333,6 +334,7 @@ assert re.match(r'^[a-zA-Z_\-]+$', k) write(k + ':' + netstring(dict[k])) + @param data=URIExtensionData @return: a Deferred that fires (with None) when the operation completes """ @@ -345,12 +347,15 @@ @return: a Deferred that fires (with None) when the operation completes """ -class IStorageBucketReader(Interface): - def get_block_data(blocknum=int, blocksize=int, size=int): +class IStorageBucketReader(Interface): + def get_block_data(blocknum, blocksize, size): """Most blocks will be the same size. The last block might be shorter than the others. + @param blocknum=int + @param blocksize=int + @param size=int @return: ShareData """ @@ -359,12 +364,13 @@ @return: ListOf(Hash) """ - def get_block_hashes(at_least_these=SetOf(int)): + def get_block_hashes(at_least_these=()): """ + @param at_least_these=SetOf(int) @return: ListOf(Hash) """ - def get_share_hashes(at_least_these=SetOf(int)): + def get_share_hashes(): """ @return: ListOf(TupleOf(int, Hash)) """ @@ -374,6 +380,7 @@ @return: URIExtensionData """ + class IStorageBroker(Interface): def get_servers_for_psi(peer_selection_index): """ @@ -421,7 +428,7 @@ remote_host: the IAddress, if connected, otherwise None This method is intended for monitoring interfaces, such as a web page - which describes connecting and connected peers. + that describes connecting and connected peers. """ def get_all_peerids(): @@ -439,12 +446,23 @@ repeatable way, to distribute load over many peers. """ -class IServer(Interface): + +class IDisplayableServer(Interface): + def get_nickname(): + pass + + def get_name(): + pass + + def get_longname(): + pass + + +class IServer(IDisplayableServer): """I live in the client, and represent a single server.""" def start_connecting(tub, trigger_cb): pass - def get_nickname(): - pass + def get_rref(): """Once a server is connected, I return a RemoteReference. Before a server is connected for the first time, I return None. @@ -458,7 +476,7 @@ """ The interface for a writer around a mutable slot on a remote server. """ - def set_checkstring(checkstring, *args): + def set_checkstring(seqnum_or_checkstring, root_hash=None, salt=None): """ Set the checkstring that I will pass to the remote server when writing. @@ -487,13 +505,15 @@ Add the encrypted private key to the share. """ - def put_blockhashes(blockhashes=list): + def put_blockhashes(blockhashes): """ + @param blockhashes=list Add the block hash tree to the share. """ - def put_sharehashes(sharehashes=dict): + def put_sharehashes(sharehashes): """ + @param sharehashes=dict Add the share hash chain to the share. """ @@ -537,7 +557,7 @@ # TODO: rename to get_read_cap() def get_readonly(): - """Return another IURI instance, which represents a read-only form of + """Return another IURI instance that represents a read-only form of this one. If is_readonly() is True, this returns self.""" def get_verify_cap(): @@ -552,6 +572,7 @@ """Return a string of printable ASCII characters, suitable for passing into init_from_string.""" + class IVerifierURI(Interface, IURI): def init_from_string(uri): """Accept a string (as created by my to_string() method) and populate @@ -563,14 +584,17 @@ """Return a string of printable ASCII characters, suitable for passing into init_from_string.""" + class IDirnodeURI(Interface): - """I am a URI which represents a dirnode.""" + """I am a URI that represents a dirnode.""" + class IFileURI(Interface): - """I am a URI which represents a filenode.""" + """I am a URI that represents a filenode.""" def get_size(): """Return the length (in bytes) of the file that I represent.""" + class IImmutableFileURI(IFileURI): pass @@ -583,6 +607,7 @@ class IReadonlyDirectoryURI(Interface): pass + class CapConstraintError(Exception): """A constraint on a cap was violated.""" @@ -714,8 +739,7 @@ writer-visible data using this writekey. """ - # TODO: Can this be overwrite instead of replace? - def replace(new_contents): + def overwrite(new_contents): """Replace the contents of the mutable file, provided that no other node has published (or is attempting to publish, concurrently) a newer version of the file than this one. @@ -872,8 +896,9 @@ data this node represents. """ + class IFileNode(IFilesystemNode): - """I am a node which represents a file: a sequence of bytes. I am not a + """I am a node that represents a file: a sequence of bytes. I am not a container, like IDirectoryNode.""" def get_best_readable_version(): """Return a Deferred that fires with an IReadable for the 'best' @@ -922,7 +947,7 @@ multiple versions of a file present in the grid, some of which might be unrecoverable (i.e. have fewer than 'k' shares). These versions are loosely ordered: each has a sequence number and a hash, and any version - with seqnum=N was uploaded by a node which has seen at least one version + with seqnum=N was uploaded by a node that has seen at least one version with seqnum=N-1. The 'servermap' (an instance of IMutableFileServerMap) is used to @@ -1031,7 +1056,7 @@ as a guide to where the shares are located. I return a Deferred that fires with the requested contents, or - errbacks with UnrecoverableFileError. Note that a servermap which was + errbacks with UnrecoverableFileError. Note that a servermap that was updated with MODE_ANYTHING or MODE_READ may not know about shares for all versions (those modes stop querying servers as soon as they can fulfil their goals), so you may want to use MODE_CHECK (which checks @@ -1077,6 +1102,7 @@ def get_version(): """Returns the mutable file protocol version.""" + class NotEnoughSharesError(Exception): """Download was unable to get enough shares""" @@ -1090,7 +1116,7 @@ """Upload was unable to satisfy 'servers_of_happiness'""" class UnableToFetchCriticalDownloadDataError(Exception): - """I was unable to fetch some piece of critical data which is supposed to + """I was unable to fetch some piece of critical data that is supposed to be identically present in all shares.""" class NoServersError(Exception): @@ -1102,7 +1128,7 @@ exists, and overwrite= was set to False.""" class NoSuchChildError(Exception): - """A directory node was asked to fetch a child which does not exist.""" + """A directory node was asked to fetch a child that does not exist.""" def __str__(self): # avoid UnicodeEncodeErrors when converting to str return self.__repr__() @@ -1110,6 +1136,7 @@ class ChildOfWrongTypeError(Exception): """An operation was attempted on a child of the wrong type (file or directory).""" + class IDirectoryNode(IFilesystemNode): """I represent a filesystem node that is a container, with a name-to-child mapping, holding the tahoe equivalent of a directory. All @@ -1343,6 +1370,7 @@ takes several minutes of 100% CPU for ~1700 directories). """ + class ICodecEncoder(Interface): def set_params(data_size, required_shares, max_shares): """Set up the parameters of this encoder. @@ -1423,7 +1451,7 @@ if you initially thought you were going to use 10 peers, started encoding, and then two of the peers dropped out: you could use desired_share_ids= to skip the work (both memory and CPU) of - producing shares for the peers which are no longer available. + producing shares for the peers that are no longer available. """ @@ -1498,7 +1526,7 @@ if you initially thought you were going to use 10 peers, started encoding, and then two of the peers dropped out: you could use desired_share_ids= to skip the work (both memory and CPU) of - producing shares for the peers which are no longer available. + producing shares for the peers that are no longer available. For each call, encode() will return a Deferred that fires with two lists, one containing shares and the other containing the shareids. @@ -1555,7 +1583,7 @@ required to be of the same length. The i'th element of their_shareids is required to be the shareid of the i'th buffer in some_shares. - This returns a Deferred which fires with a sequence of buffers. This + This returns a Deferred that fires with a sequence of buffers. This sequence will contain all of the segments of the original data, in order. The sum of the lengths of all of the buffers will be the 'data_size' value passed into the original ICodecEncode.set_params() @@ -1575,6 +1603,7 @@ call. """ + class IEncoder(Interface): """I take an object that provides IEncryptedUploadable, which provides encrypted data, and a list of shareholders. I then encode, hash, and @@ -1593,21 +1622,6 @@ """Specify the number of bytes that will be encoded. This must be peformed before get_serialized_params() can be called. """ - def set_params(params): - """Override the default encoding parameters. 'params' is a tuple of - (k,d,n), where 'k' is the number of required shares, 'd' is the - servers_of_happiness, and 'n' is the total number of shares that will - be created. - - Encoding parameters can be set in three ways. 1: The Encoder class - provides defaults (3/7/10). 2: the Encoder can be constructed with - an 'options' dictionary, in which the - needed_and_happy_and_total_shares' key can be a (k,d,n) tuple. 3: - set_params((k,d,n)) can be called. - - If you intend to use set_params(), you must call it before - get_share_size or get_param are called. - """ def set_encrypted_uploadable(u): """Provide a source of encrypted upload data. 'u' must implement @@ -1683,6 +1697,7 @@ sufficient to construct the read cap. """ + class IDecoder(Interface): """I take a list of shareholders and some setup information, then download, validate, decode, and decrypt data from them, writing the @@ -1717,6 +1732,7 @@ complete. """ + class IDownloadTarget(Interface): # Note that if the IDownloadTarget is also an IConsumer, the downloader # will register itself as a producer. This allows the target to invoke @@ -1730,22 +1746,27 @@ def write(data): """Output some data to the target.""" + def close(): """Inform the target that there is no more data to be written.""" + def fail(why): """fail() is called to indicate that the download has failed. 'why' is a Failure object indicating what went wrong. No further methods will be invoked on the IDownloadTarget after fail().""" + def register_canceller(cb): """The CiphertextDownloader uses this to register a no-argument function that the target can call to cancel the download. Once this canceller is invoked, no further calls to write() or close() will be made.""" + def finish(): """When the CiphertextDownloader is done, this finish() function will be called. Whatever it returns will be returned to the invoker of Downloader.download. """ + class IDownloader(Interface): def download(uri, target): """Perform a CHK download, sending the data to the given target. @@ -1754,6 +1775,7 @@ Returns a Deferred that fires (with the results of target.finish) when the download is finished, or errbacks if something went wrong.""" + class IEncryptedUploadable(Interface): def set_upload_status(upload_status): """Provide an IUploadStatus object that should be filled with status @@ -1792,37 +1814,10 @@ resuming an interrupted upload (where we need to compute the plaintext hashes, but don't need the redundant encrypted data).""" - def get_plaintext_hashtree_leaves(first, last, num_segments): - """OBSOLETE; Get the leaf nodes of a merkle hash tree over the - plaintext segments, i.e. get the tagged hashes of the given segments. - The segment size is expected to be generated by the - IEncryptedUploadable before any plaintext is read or ciphertext - produced, so that the segment hashes can be generated with only a - single pass. - - This returns a Deferred which fires with a sequence of hashes, using: - - tuple(segment_hashes[first:last]) - - 'num_segments' is used to assert that the number of segments that the - IEncryptedUploadable handled matches the number of segments that the - encoder was expecting. - - This method must not be called until the final byte has been read - from read_encrypted(). Once this method is called, read_encrypted() - can never be called again. - """ - - def get_plaintext_hash(): - """OBSOLETE; Get the hash of the whole plaintext. - - This returns a Deferred which fires with a tagged SHA-256 hash of the - whole plaintext, obtained from hashutil.plaintext_hash(data). - """ - def close(): """Just like IUploadable.close().""" + class IUploadable(Interface): def set_upload_status(upload_status): """Provide an IUploadStatus object that should be filled with status @@ -1876,13 +1871,13 @@ be used to encrypt the data. The key will also be hashed to derive the StorageIndex. - Uploadables which want to achieve convergence should hash their file + Uploadables that want to achieve convergence should hash their file contents and the serialized_encoding_parameters to form the key (which of course requires a full pass over the data). Uploadables can use the upload.ConvergentUploadMixin class to achieve this automatically. - Uploadables which do not care about convergence (or do not wish to + Uploadables that do not care about convergence (or do not wish to make multiple passes over the data) can simply return a strongly-random 16 byte string. @@ -1892,7 +1887,7 @@ def read(length): """Return a Deferred that fires with a list of strings (perhaps with - only a single element) which, when concatenated together, contain the + only a single element) that, when concatenated together, contain the next 'length' bytes of data. If EOF is near, this may provide fewer than 'length' bytes. The total number of bytes provided by read() before it signals EOF must equal the size provided by get_size(). @@ -1939,7 +1934,7 @@ def read(length): """ - Returns a list of strings which, when concatenated, are the next + Returns a list of strings that, when concatenated, are the next length bytes of the file, or fewer if there are fewer bytes between the current location and the end of the file. """ @@ -1950,42 +1945,68 @@ the uploadable may be closed. """ + class IUploadResults(Interface): - """I am returned by upload() methods. I contain a number of public - attributes which can be read to determine the results of the upload. Some - of these are functional, some are timing information. All of these may be - None. + """I am returned by immutable upload() methods and contain the results of + the upload. - .file_size : the size of the file, in bytes - .uri : the CHK read-cap for the file - .ciphertext_fetched : how many bytes were fetched by the helper - .sharemap: dict mapping share identifier to set of serverids - (binary strings). This indicates which servers were given - which shares. For immutable files, the shareid is an - integer (the share number, from 0 to N-1). For mutable - files, it is a string of the form 'seq%d-%s-sh%d', - containing the sequence number, the roothash, and the - share number. - .servermap : dict mapping server peerid to a set of share numbers - .timings : dict of timing information, mapping name to seconds (float) - total : total upload time, start to finish - storage_index : time to compute the storage index - peer_selection : time to decide which peers will be used - contacting_helper : initial helper query to upload/no-upload decision - existence_check : helper pre-upload existence check - helper_total : initial helper query to helper finished pushing - cumulative_fetch : helper waiting for ciphertext requests - total_fetch : helper start to last ciphertext response - cumulative_encoding : just time spent in zfec - cumulative_sending : just time spent waiting for storage servers - hashes_and_close : last segment push to shareholder close - total_encode_and_push : first encode to shareholder close + Note that some of my methods return empty values (0 or an empty dict) + when called for non-distributed LIT files.""" + + def get_file_size(): + """Return the file size, in bytes.""" + + def get_uri(): + """Return the (string) URI of the object uploaded, a CHK readcap.""" + + def get_ciphertext_fetched(): + """Return the number of bytes fetched by the helpe for this upload, + or 0 if the helper did not need to fetch any bytes (or if there was + no helper).""" + + def get_preexisting_shares(): + """Return the number of shares that were already present in the grid.""" + + def get_pushed_shares(): + """Return the number of shares that were uploaded.""" + + def get_sharemap(): + """Return a dict mapping share identifier to set of IServer + instances. This indicates which servers were given which shares. For + immutable files, the shareid is an integer (the share number, from 0 + to N-1). For mutable files, it is a string of the form + 'seq%d-%s-sh%d', containing the sequence number, the roothash, and + the share number.""" + + def get_servermap(): + """Return dict mapping IServer instance to a set of share numbers.""" + + def get_timings(): + """Return dict of timing information, mapping name to seconds. All + times are floats: + total : total upload time, start to finish + storage_index : time to compute the storage index + peer_selection : time to decide which peers will be used + contacting_helper : initial helper query to upload/no-upload decision + helper_total : initial helper query to helper finished pushing + cumulative_fetch : helper waiting for ciphertext requests + total_fetch : helper start to last ciphertext response + cumulative_encoding : just time spent in zfec + cumulative_sending : just time spent waiting for storage servers + hashes_and_close : last segment push to shareholder close + total_encode_and_push : first encode to shareholder close + """ + + def get_uri_extension_data(): + """Return the dict of UEB data created for this file.""" + + def get_verifycapstr(): + """Return the (string) verify-cap URI for the uploaded object.""" - """ class IDownloadResults(Interface): """I am created internally by download() methods. I contain a number of - public attributes which contain details about the download process.:: + public attributes that contain details about the download process.:: .file_size : the size of the file, in bytes .servers_used : set of server peerids that were used during download @@ -2005,17 +2026,15 @@ cumulative_decrypt : just time spent in decryption total : total download time, start to finish fetch_per_server : dict of server to list of per-segment fetch times - """ + class IUploader(Interface): def upload(uploadable): """Upload the file. 'uploadable' must impement IUploadable. This - returns a Deferred which fires with an IUploadResults instance, from + returns a Deferred that fires with an IUploadResults instance, from which the URI of the file can be obtained as results.uri .""" - def upload_ssk(write_capability, new_version, uploadable): - """TODO: how should this work?""" class ICheckable(Interface): def check(monitor, verify=False, add_lease=False): @@ -2061,7 +2080,7 @@ kind of lease that is obtained (which account number to claim, etc). TODO: any problems seen during checking will be reported to the - health-manager.furl, a centralized object which is responsible for + health-manager.furl, a centralized object that is responsible for figuring out why files are unhealthy so corrective action can be taken. """ @@ -2076,9 +2095,10 @@ will be put in the check-and-repair results. The Deferred will not fire until the repair is complete. - This returns a Deferred which fires with an instance of + This returns a Deferred that fires with an instance of ICheckAndRepairResults.""" + class IDeepCheckable(Interface): def start_deep_check(verify=False, add_lease=False): """Check upon the health of me and everything I can reach. @@ -2112,14 +2132,17 @@ failure. """ + class ICheckResults(Interface): """I contain the detailed results of a check/verify operation. """ def get_storage_index(): """Return a string with the (binary) storage index.""" + def get_storage_index_string(): """Return a string with the (printable) abbreviated storage index.""" + def get_uri(): """Return the (string) URI of the object that was checked.""" @@ -2138,75 +2161,69 @@ improved by moving shares to new servers. Non-distributed LIT files always return False.""" + # the following methods all return None for non-distributed LIT files - def get_data(): - """Return a dictionary that describes the state of the file/dir. LIT - files always return an empty dictionary. Normal files and directories - return a dictionary with the following keys (note that these use - binary strings rather than base32-encoded ones) (also note that for - mutable files, these counts are for the 'best' version): - - count-shares-good: the number of distinct good shares that were found - count-shares-needed: 'k', the number of shares required for recovery - count-shares-expected: 'N', the number of total shares generated - count-good-share-hosts: the number of distinct storage servers with - good shares. If this number is less than - count-shares-good, then some shares are - doubled up, increasing the correlation of - failures. This indicates that one or more - shares should be moved to an otherwise unused - server, if one is available. - count-corrupt-shares: the number of shares with integrity failures - list-corrupt-shares: a list of 'share locators', one for each share - that was found to be corrupt. Each share - locator is a list of (serverid, storage_index, - sharenum). - count-incompatible-shares: the number of shares which are of a share - format unknown to this checker - list-incompatible-shares: a list of 'share locators', one for each - share that was found to be of an unknown - format. Each share locator is a list of - (serverid, storage_index, sharenum). - servers-responding: list of (binary) storage server identifiers, - one for each server which responded to the share - query (even if they said they didn't have - shares, and even if they said they did have - shares but then didn't send them when asked, or - dropped the connection, or returned a Failure, - and even if they said they did have shares and - sent incorrect ones when asked) - sharemap: dict mapping share identifier to list of serverids - (binary strings). This indicates which servers are holding - which shares. For immutable files, the shareid is an - integer (the share number, from 0 to N-1). For mutable - files, it is a string of the form 'seq%d-%s-sh%d', - containing the sequence number, the roothash, and the - share number. - - The following keys are most relevant for mutable files, but immutable - files will provide sensible values too:: - - count-wrong-shares: the number of shares for versions other than the - 'best' one (which is defined as being the - recoverable version with the highest sequence - number, then the highest roothash). These are - either leftover shares from an older version - (perhaps on a server that was offline when an - update occurred), shares from an unrecoverable - newer version, or shares from an alternate - current version that results from an - uncoordinated write collision. For a healthy - file, this will equal 0. - - count-recoverable-versions: the number of recoverable versions of - the file. For a healthy file, this will - equal 1. - - count-unrecoverable-versions: the number of unrecoverable versions - of the file. For a healthy file, this - will be 0. + def get_encoding_needed(): + """Return 'k', the number of shares required for recovery""" - """ + def get_encoding_expected(): + """Return 'N', the number of total shares generated""" + + def get_share_counter_good(): + """Return the number of distinct good shares that were found. For + mutable files, this counts shares for the 'best' version.""" + + def get_share_counter_wrong(): + """For mutable files, return the number of shares for versions other + than the 'best' one (which is defined as being the recoverable + version with the highest sequence number, then the highest roothash). + These are either leftover shares from an older version (perhaps on a + server that was offline when an update occurred), shares from an + unrecoverable newer version, or shares from an alternate current + version that results from an uncoordinated write collision. For a + healthy file, this will equal 0. For immutable files, this will + always equal 0.""" + + def get_corrupt_shares(): + """Return a list of 'share locators', one for each share that was + found to be corrupt (integrity failure). Each share locator is a list + of (IServer, storage_index, sharenum).""" + + def get_incompatible_shares(): + """Return a list of 'share locators', one for each share that was + found to be of an unknown format. Each share locator is a list of + (IServer, storage_index, sharenum).""" + + def get_servers_responding(): + """Return a list of IServer objects, one for each server that + responded to the share query (even if they said they didn't have + shares, and even if they said they did have shares but then didn't + send them when asked, or dropped the connection, or returned a + Failure, and even if they said they did have shares and sent + incorrect ones when asked)""" + + def get_host_counter_good_shares(): + """Return the number of distinct storage servers with good shares. If + this number is less than get_share_counters()[good], then some shares + are doubled up, increasing the correlation of failures. This + indicates that one or more shares should be moved to an otherwise + unused server, if one is available. + """ + + def get_version_counter_recoverable(): + """Return the number of recoverable versions of the file. For a + healthy file, this will equal 1.""" + + def get_version_counter_unrecoverable(): + """Return the number of unrecoverable versions of the file. For a + healthy file, this will be 0.""" + + def get_sharemap(): + """Return a dict mapping share identifier to list of IServer objects. + This indicates which servers are holding which shares. For immutable + files, the shareid is an integer (the share number, from 0 to N-1). + For mutable files, it is a string of the form 'seq%d-%s-sh%d', + containing the sequence number, the roothash, and the share number.""" def get_summary(): """Return a string with a brief (one-line) summary of the results.""" @@ -2214,6 +2231,7 @@ def get_report(): """Return a list of strings with more detailed results.""" + class ICheckAndRepairResults(Interface): """I contain the detailed results of a check/verify/repair operation. @@ -2223,20 +2241,25 @@ def get_storage_index(): """Return a string with the (binary) storage index.""" + def get_storage_index_string(): """Return a string with the (printable) abbreviated storage index.""" + def get_repair_attempted(): """Return a boolean, True if a repair was attempted. We might not attempt to repair the file because it was healthy, or healthy enough (i.e. some shares were missing but not enough to exceed some threshold), or because we don't know how to repair this object.""" + def get_repair_successful(): """Return a boolean, True if repair was attempted and the file/dir was fully healthy afterwards. False if no repair was attempted or if a repair attempt failed.""" + def get_pre_repair_results(): """Return an ICheckResults instance that describes the state of the file/dir before any repair was attempted.""" + def get_post_repair_results(): """Return an ICheckResults instance that describes the state of the file/dir after any repair was attempted. If no repair was attempted, @@ -2252,6 +2275,7 @@ def get_root_storage_index_string(): """Return the storage index (abbreviated human-readable string) of the first object checked.""" + def get_counters(): """Return a dictionary with the following keys:: @@ -2266,10 +2290,9 @@ """ def get_corrupt_shares(): - """Return a set of (serverid, storage_index, sharenum) for all shares - that were found to be corrupt. Both serverid and storage_index are - binary. - """ + """Return a set of (IServer, storage_index, sharenum) for all shares + that were found to be corrupt. storage_index is binary.""" + def get_all_results(): """Return a dictionary mapping pathname (a tuple of strings, ready to be slash-joined) to an ICheckResults instance, one for each object @@ -2284,6 +2307,7 @@ """Return a dictionary with the same keys as IDirectoryNode.deep_stats().""" + class IDeepCheckAndRepairResults(Interface): """I contain the results of a deep-check-and-repair operation. @@ -2293,6 +2317,7 @@ def get_root_storage_index_string(): """Return the storage index (abbreviated human-readable string) of the first object checked.""" + def get_counters(): """Return a dictionary with the following keys:: @@ -2334,15 +2359,15 @@ IDirectoryNode.deep_stats().""" def get_corrupt_shares(): - """Return a set of (serverid, storage_index, sharenum) for all shares - that were found to be corrupt before any repair was attempted. Both - serverid and storage_index are binary. + """Return a set of (IServer, storage_index, sharenum) for all shares + that were found to be corrupt before any repair was attempted. + storage_index is binary. """ def get_remaining_corrupt_shares(): - """Return a set of (serverid, storage_index, sharenum) for all shares - that were found to be corrupt after any repair was completed. Both - serverid and storage_index are binary. These are shares that need - manual inspection and probably deletion. + """Return a set of (IServer, storage_index, sharenum) for all shares + that were found to be corrupt after any repair was completed. + storage_index is binary. These are shares that need manual inspection + and probably deletion. """ def get_all_results(): """Return a dictionary mapping pathname (a tuple of strings, ready to @@ -2376,9 +2401,10 @@ return d """ + class IRepairResults(Interface): """I contain the results of a repair operation.""" - def get_successful(self): + def get_successful(): """Returns a boolean: True if the repair made the file healthy, False if not. Repair failure generally indicates a file that has been damaged beyond repair.""" @@ -2437,6 +2463,7 @@ DirectoryNode. """ + class INodeMaker(Interface): """The NodeMaker is used to create IFilesystemNode instances. It can accept a filecap/dircap string and return the node right away. It can @@ -2450,13 +2477,14 @@ Tahoe process will typically have a single NodeMaker, but unit tests may create simplified/mocked forms for testing purposes. """ - def create_from_cap(writecap, readcap=None, **kwargs): + + def create_from_cap(writecap, readcap=None, deep_immutable=False, name=u""): """I create an IFilesystemNode from the given writecap/readcap. I can only provide nodes for existing file/directory objects: use my other methods to create new objects. I return synchronously.""" def create_mutable_file(contents=None, keysize=None): - """I create a new mutable file, and return a Deferred which will fire + """I create a new mutable file, and return a Deferred that will fire with the IMutableFileNode instance when it is ready. If contents= is provided (a bytestring), it will be used as the initial contents of the new file, otherwise the file will contain zero bytes. keysize= is @@ -2464,50 +2492,60 @@ usual.""" def create_new_mutable_directory(initial_children={}): - """I create a new mutable directory, and return a Deferred which will + """I create a new mutable directory, and return a Deferred that will fire with the IDirectoryNode instance when it is ready. If initial_children= is provided (a dict mapping unicode child name to (childnode, metadata_dict) tuples), the directory will be populated with those children, otherwise it will be empty.""" + class IClientStatus(Interface): def list_all_uploads(): - """Return a list of uploader objects, one for each upload which + """Return a list of uploader objects, one for each upload that currently has an object available (tracked with weakrefs). This is intended for debugging purposes.""" + def list_active_uploads(): """Return a list of active IUploadStatus objects.""" + def list_recent_uploads(): """Return a list of IUploadStatus objects for the most recently started uploads.""" def list_all_downloads(): - """Return a list of downloader objects, one for each download which + """Return a list of downloader objects, one for each download that currently has an object available (tracked with weakrefs). This is intended for debugging purposes.""" + def list_active_downloads(): """Return a list of active IDownloadStatus objects.""" + def list_recent_downloads(): """Return a list of IDownloadStatus objects for the most recently started downloads.""" + class IUploadStatus(Interface): def get_started(): """Return a timestamp (float with seconds since epoch) indicating when the operation was started.""" + def get_storage_index(): """Return a string with the (binary) storage index in use on this upload. Returns None if the storage index has not yet been calculated.""" + def get_size(): """Return an integer with the number of bytes that will eventually be uploaded for this file. Returns None if the size is not yet known. """ def using_helper(): """Return True if this upload is using a Helper, False if not.""" + def get_status(): """Return a string describing the current state of the upload process.""" + def get_progress(): """Returns a tuple of floats, (chk, ciphertext, encode_and_push), each from 0.0 to 1.0 . 'chk' describes how much progress has been @@ -2519,60 +2557,75 @@ process has finished: for helper uploads this is dependent upon the helper providing progress reports. It might be reasonable to add all three numbers and report the sum to the user.""" + def get_active(): """Return True if the upload is currently active, False if not.""" + def get_results(): """Return an instance of UploadResults (which contains timing and sharemap information). Might return None if the upload is not yet finished.""" + def get_counter(): """Each upload status gets a unique number: this method returns that number. This provides a handle to this particular upload, so a web page can generate a suitable hyperlink.""" + class IDownloadStatus(Interface): def get_started(): """Return a timestamp (float with seconds since epoch) indicating when the operation was started.""" + def get_storage_index(): """Return a string with the (binary) storage index in use on this download. This may be None if there is no storage index (i.e. LIT files).""" + def get_size(): """Return an integer with the number of bytes that will eventually be retrieved for this file. Returns None if the size is not yet known. """ + def using_helper(): """Return True if this download is using a Helper, False if not.""" + def get_status(): """Return a string describing the current state of the download process.""" + def get_progress(): """Returns a float (from 0.0 to 1.0) describing the amount of the download that has completed. This value will remain at 0.0 until the first byte of plaintext is pushed to the download target.""" + def get_active(): """Return True if the download is currently active, False if not.""" + def get_counter(): """Each download status gets a unique number: this method returns that number. This provides a handle to this particular download, so a web page can generate a suitable hyperlink.""" + class IServermapUpdaterStatus(Interface): pass + class IPublishStatus(Interface): pass + class IRetrieveStatus(Interface): pass + class NotCapableError(Exception): """You have tried to write to a read-only node.""" class BadWriteEnablerError(Exception): pass -class RIControlClient(RemoteInterface): +class RIControlClient(RemoteInterface): def wait_for_client_connections(num_clients=int): """Do not return until we have connections to at least NUM_CLIENTS storage servers. @@ -2627,8 +2680,10 @@ return DictOf(str, float) + UploadResults = Any() #DictOf(str, str) + class RIEncryptedUploadable(RemoteInterface): __remote_name__ = "RIEncryptedUploadable.tahoe.allmydata.com" @@ -2701,6 +2756,7 @@ """ return DictOf(str, DictOf(str, ChoiceOf(float, int, long, None))) + class RIStatsGatherer(RemoteInterface): __remote_name__ = "RIStatsGatherer.tahoe.allmydata.com" """ @@ -2709,7 +2765,7 @@ def provide(provider=RIStatsProvider, nickname=str): """ - @param provider: a stats collector instance which should be polled + @param provider: a stats collector instance that should be polled periodically by the gatherer to collect stats. @param nickname: a name useful to identify the provided client """ @@ -2740,16 +2796,19 @@ class FileTooLargeError(Exception): pass + class IValidatedThingProxy(Interface): def start(): - """ Acquire a thing and validate it. Return a deferred which is + """ Acquire a thing and validate it. Return a deferred that is eventually fired with self if the thing is valid or errbacked if it can't be acquired or validated.""" + class InsufficientVersionError(Exception): def __init__(self, needed, got): self.needed = needed self.got = got + def __repr__(self): return "InsufficientVersionError(need '%s', got %s)" % (self.needed, self.got) diff -Nru tahoe-lafs-1.9.2/src/allmydata/introducer/client.py tahoe-lafs-1.10.0/src/allmydata/introducer/client.py --- tahoe-lafs-1.9.2/src/allmydata/introducer/client.py 2012-05-14 02:07:22.000000000 +0000 +++ tahoe-lafs-1.10.0/src/allmydata/introducer/client.py 2013-09-03 15:38:27.000000000 +0000 @@ -1,29 +1,78 @@ -from base64 import b32decode +import time from zope.interface import implements from twisted.application import service -from foolscap.api import Referenceable, SturdyRef, eventually +from foolscap.api import Referenceable, eventually, RemoteInterface from allmydata.interfaces import InsufficientVersionError -from allmydata.introducer.interfaces import RIIntroducerSubscriberClient, \ - IIntroducerClient -from allmydata.util import log, idlib -from allmydata.util.rrefutil import add_version_to_remote_reference, trap_deadref +from allmydata.introducer.interfaces import IIntroducerClient, \ + RIIntroducerSubscriberClient_v1, RIIntroducerSubscriberClient_v2 +from allmydata.introducer.common import sign_to_foolscap, unsign_from_foolscap,\ + convert_announcement_v1_to_v2, convert_announcement_v2_to_v1, \ + make_index, get_tubid_string_from_ann, get_tubid_string +from allmydata.util import log +from allmydata.util.rrefutil import add_version_to_remote_reference +from allmydata.util.keyutil import BadSignatureError + +class WrapV2ClientInV1Interface(Referenceable): # for_v1 + """I wrap a v2 IntroducerClient to make it look like a v1 client, so it + can be attached to an old server.""" + implements(RIIntroducerSubscriberClient_v1) + def __init__(self, original): + self.original = original + + def remote_announce(self, announcements): + lp = self.original.log("received %d announcements (v1)" % + len(announcements)) + anns_v1 = set([convert_announcement_v1_to_v2(ann_v1) + for ann_v1 in announcements]) + return self.original.got_announcements(anns_v1, lp) + + def remote_set_encoding_parameters(self, parameters): + self.original.remote_set_encoding_parameters(parameters) + +class RIStubClient(RemoteInterface): # for_v1 + """Each client publishes a service announcement for a dummy object called + the StubClient. This object doesn't actually offer any services, but the + announcement helps the Introducer keep track of which clients are + subscribed (so the grid admin can keep track of things like the size of + the grid and the client versions in use. This is the (empty) + RemoteInterface for the StubClient.""" + +class StubClient(Referenceable): # for_v1 + implements(RIStubClient) + +V1 = "http://allmydata.org/tahoe/protocols/introducer/v1" +V2 = "http://allmydata.org/tahoe/protocols/introducer/v2" class IntroducerClient(service.Service, Referenceable): - implements(RIIntroducerSubscriberClient, IIntroducerClient) + implements(RIIntroducerSubscriberClient_v2, IIntroducerClient) def __init__(self, tub, introducer_furl, - nickname, my_version, oldest_supported): + nickname, my_version, oldest_supported, + app_versions, sequencer): self._tub = tub self.introducer_furl = introducer_furl assert type(nickname) is unicode - self._nickname_utf8 = nickname.encode("utf-8") # we always send UTF-8 + self._nickname = nickname self._my_version = my_version self._oldest_supported = oldest_supported + self._app_versions = app_versions + self._sequencer = sequencer - self._published_announcements = set() + self._my_subscriber_info = { "version": 0, + "nickname": self._nickname, + "app-versions": self._app_versions, + "my-version": self._my_version, + "oldest-supported": self._oldest_supported, + } + self._stub_client = None # for_v1 + self._stub_client_furl = None + + self._outbound_announcements = {} # not signed + self._published_announcements = {} # signed + self._canary = Referenceable() self._publisher = None @@ -31,13 +80,14 @@ self._subscribed_service_names = set() self._subscriptions = set() # requests we've actually sent - # _current_announcements remembers one announcement per + # _inbound_announcements remembers one announcement per # (servicename,serverid) pair. Anything that arrives with the same - # pair will displace the previous one. This stores unpacked - # announcement dictionaries, which can be compared for equality to - # distinguish re-announcement from updates. It also provides memory - # for clients who subscribe after startup. - self._current_announcements = {} + # pair will displace the previous one. This stores tuples of + # (unpacked announcement dictionary, verifyingkey, rxtime). The ann + # dicts can be compared for equality to distinguish re-announcement + # from updates. It also provides memory for clients who subscribe + # after startup. + self._inbound_announcements = {} self.encoding_parameters = None @@ -51,6 +101,11 @@ "new_announcement": 0, "outbound_message": 0, } + self._debug_outstanding = 0 + + def _debug_retired(self, res): + self._debug_outstanding -= 1 + return res def startService(self): service.Service.startService(self) @@ -79,10 +134,9 @@ def _got_versioned_introducer(self, publisher): self.log("got introducer version: %s" % (publisher.version,)) - # we require a V1 introducer - needed = "http://allmydata.org/tahoe/protocols/introducer/v1" - if needed not in publisher.version: - raise InsufficientVersionError(needed, publisher.version) + # we require an introducer that speaks at least one of (V1, V2) + if not (V1 in publisher.version or V2 in publisher.version): + raise InsufficientVersionError("V1 or V2", publisher.version) self._publisher = publisher publisher.notifyOnDisconnect(self._disconnected) self._maybe_publish() @@ -95,24 +149,17 @@ def log(self, *args, **kwargs): if "facility" not in kwargs: - kwargs["facility"] = "tahoe.introducer" + kwargs["facility"] = "tahoe.introducer.client" return log.msg(*args, **kwargs) - - def publish(self, furl, service_name, remoteinterface_name): - assert type(self._nickname_utf8) is str # we always send UTF-8 - ann = (furl, service_name, remoteinterface_name, - self._nickname_utf8, self._my_version, self._oldest_supported) - self._published_announcements.add(ann) - self._maybe_publish() - def subscribe_to(self, service_name, cb, *args, **kwargs): self._local_subscribers.append( (service_name,cb,args,kwargs) ) self._subscribed_service_names.add(service_name) self._maybe_subscribe() - for (servicename,nodeid),ann_d in self._current_announcements.items(): + for index,(ann,key_s,when) in self._inbound_announcements.items(): + servicename = index[0] if servicename == service_name: - eventually(cb, nodeid, ann_d) + eventually(cb, key_s, ann, *args, **kwargs) def _maybe_subscribe(self): if not self._publisher: @@ -120,96 +167,192 @@ level=log.NOISY) return for service_name in self._subscribed_service_names: - if service_name not in self._subscriptions: - # there is a race here, but the subscription desk ignores - # duplicate requests. - self._subscriptions.add(service_name) - d = self._publisher.callRemote("subscribe", self, service_name) - d.addErrback(trap_deadref) - d.addErrback(log.err, format="server errored during subscribe", - facility="tahoe.introducer", - level=log.WEIRD, umid="2uMScQ") + if service_name in self._subscriptions: + continue + self._subscriptions.add(service_name) + if V2 in self._publisher.version: + self._debug_outstanding += 1 + d = self._publisher.callRemote("subscribe_v2", + self, service_name, + self._my_subscriber_info) + d.addBoth(self._debug_retired) + else: + d = self._subscribe_handle_v1(service_name) # for_v1 + d.addErrback(log.err, facility="tahoe.introducer.client", + level=log.WEIRD, umid="2uMScQ") + + def _subscribe_handle_v1(self, service_name): # for_v1 + # they don't speak V2: must be a v1 introducer. Fall back to the v1 + # 'subscribe' method, using a client adapter. + ca = WrapV2ClientInV1Interface(self) + self._debug_outstanding += 1 + d = self._publisher.callRemote("subscribe", ca, service_name) + d.addBoth(self._debug_retired) + # We must also publish an empty 'stub_client' object, so the + # introducer can count how many clients are connected and see what + # versions they're running. + if not self._stub_client_furl: + self._stub_client = sc = StubClient() + self._stub_client_furl = self._tub.registerReference(sc) + def _publish_stub_client(ignored): + furl = self._stub_client_furl + self.publish("stub_client", + { "anonymous-storage-FURL": furl, + "permutation-seed-base32": get_tubid_string(furl), + }) + d.addCallback(_publish_stub_client) + return d + + def create_announcement_dict(self, service_name, ann): + ann_d = { "version": 0, + # "seqnum" and "nonce" will be populated with new values in + # publish(), each time we make a change + "nickname": self._nickname, + "app-versions": self._app_versions, + "my-version": self._my_version, + "oldest-supported": self._oldest_supported, + + "service-name": service_name, + } + ann_d.update(ann) + return ann_d + + def publish(self, service_name, ann, signing_key=None): + # we increment the seqnum every time we publish something new + current_seqnum, current_nonce = self._sequencer() + + ann_d = self.create_announcement_dict(service_name, ann) + self._outbound_announcements[service_name] = ann_d + + # publish all announcements with the new seqnum and nonce + for service_name,ann_d in self._outbound_announcements.items(): + ann_d["seqnum"] = current_seqnum + ann_d["nonce"] = current_nonce + ann_t = sign_to_foolscap(ann_d, signing_key) + self._published_announcements[service_name] = ann_t + self._maybe_publish() def _maybe_publish(self): if not self._publisher: self.log("want to publish, but no introducer yet", level=log.NOISY) return # this re-publishes everything. The Introducer ignores duplicates - for ann in self._published_announcements: + for ann_t in self._published_announcements.values(): self._debug_counts["outbound_message"] += 1 - d = self._publisher.callRemote("publish", ann) - d.addErrback(trap_deadref) - d.addErrback(log.err, - format="server errored during publish %(ann)s", - ann=ann, facility="tahoe.introducer", + if V2 in self._publisher.version: + self._debug_outstanding += 1 + d = self._publisher.callRemote("publish_v2", ann_t, + self._canary) + d.addBoth(self._debug_retired) + else: + d = self._handle_v1_publisher(ann_t) # for_v1 + d.addErrback(log.err, ann_t=ann_t, + facility="tahoe.introducer.client", level=log.WEIRD, umid="xs9pVQ") + def _handle_v1_publisher(self, ann_t): # for_v1 + # they don't speak V2, so fall back to the old 'publish' method + # (which takes an unsigned tuple of bytestrings) + self.log("falling back to publish_v1", + level=log.UNUSUAL, umid="9RCT1A") + ann_v1 = convert_announcement_v2_to_v1(ann_t) + self._debug_outstanding += 1 + d = self._publisher.callRemote("publish", ann_v1) + d.addBoth(self._debug_retired) + return d + + + def remote_announce_v2(self, announcements): + lp = self.log("received %d announcements (v2)" % len(announcements)) + return self.got_announcements(announcements, lp) - - def remote_announce(self, announcements): - self.log("received %d announcements" % len(announcements)) + def got_announcements(self, announcements, lp=None): + # this is the common entry point for both v1 and v2 announcements self._debug_counts["inbound_message"] += 1 - for ann in announcements: + for ann_t in announcements: try: - self._process_announcement(ann) - except: - log.err(format="unable to process announcement %(ann)s", - ann=ann) - # Don't let a corrupt announcement prevent us from processing - # the remaining ones. Don't return an error to the server, - # since they'd just ignore it anyways. - pass + # this might raise UnknownKeyError or bad-sig error + ann, key_s = unsign_from_foolscap(ann_t) + # key is "v0-base32abc123" + except BadSignatureError: + self.log("bad signature on inbound announcement: %s" % (ann_t,), + parent=lp, level=log.WEIRD, umid="ZAU15Q") + # process other announcements that arrived with the bad one + continue + + self._process_announcement(ann, key_s) - def _process_announcement(self, ann): + def _process_announcement(self, ann, key_s): self._debug_counts["inbound_announcement"] += 1 - (furl, service_name, ri_name, nickname_utf8, ver, oldest) = ann + service_name = str(ann["service-name"]) if service_name not in self._subscribed_service_names: self.log("announcement for a service we don't care about [%s]" % (service_name,), level=log.UNUSUAL, umid="dIpGNA") self._debug_counts["wrong_service"] += 1 return - self.log("announcement for [%s]: %s" % (service_name, ann), - umid="BoKEag") - assert type(furl) is str - assert type(service_name) is str - assert type(ri_name) is str - assert type(nickname_utf8) is str - nickname = nickname_utf8.decode("utf-8") - assert type(nickname) is unicode - assert type(ver) is str - assert type(oldest) is str - - nodeid = b32decode(SturdyRef(furl).tubID.upper()) - nodeid_s = idlib.shortnodeid_b2a(nodeid) - - ann_d = { "version": 0, - "service-name": service_name, - - "FURL": furl, - "nickname": nickname, - "app-versions": {}, # need #466 and v2 introducer - "my-version": ver, - "oldest-supported": oldest, - } - - index = (service_name, nodeid) - if self._current_announcements.get(index, None) == ann_d: - self.log("reannouncement for [%(service)s]:%(nodeid)s, ignoring", - service=service_name, nodeid=nodeid_s, - level=log.UNUSUAL, umid="B1MIdA") + # for ASCII values, simplejson might give us unicode *or* bytes + if "nickname" in ann and isinstance(ann["nickname"], str): + ann["nickname"] = unicode(ann["nickname"]) + nick_s = ann.get("nickname",u"").encode("utf-8") + lp2 = self.log(format="announcement for nickname '%(nick)s', service=%(svc)s: %(ann)s", + nick=nick_s, svc=service_name, ann=ann, umid="BoKEag") + + # how do we describe this node in the logs? + desc_bits = [] + if key_s: + desc_bits.append("serverid=" + key_s[:20]) + if "anonymous-storage-FURL" in ann: + tubid_s = get_tubid_string_from_ann(ann) + desc_bits.append("tubid=" + tubid_s[:8]) + description = "/".join(desc_bits) + + # the index is used to track duplicates + index = make_index(ann, key_s) + + # is this announcement a duplicate? + if (index in self._inbound_announcements + and self._inbound_announcements[index][0] == ann): + self.log(format="reannouncement for [%(service)s]:%(description)s, ignoring", + service=service_name, description=description, + parent=lp2, level=log.UNUSUAL, umid="B1MIdA") self._debug_counts["duplicate_announcement"] += 1 return - if index in self._current_announcements: + + # does it update an existing one? + if index in self._inbound_announcements: + old,_,_ = self._inbound_announcements[index] + if "seqnum" in old: + # must beat previous sequence number to replace + if ("seqnum" not in ann + or not isinstance(ann["seqnum"], (int,long))): + self.log("not replacing old announcement, no valid seqnum: %s" + % (ann,), + parent=lp2, level=log.NOISY, umid="zFGH3Q") + return + if ann["seqnum"] <= old["seqnum"]: + # note that exact replays are caught earlier, by + # comparing the entire signed announcement. + self.log("not replacing old announcement, " + "new seqnum is too old (%s <= %s) " + "(replay attack?): %s" + % (ann["seqnum"], old["seqnum"], ann), + parent=lp2, level=log.UNUSUAL, umid="JAAAoQ") + return + # ok, seqnum is newer, allow replacement self._debug_counts["update"] += 1 + self.log("replacing old announcement: %s" % (ann,), + parent=lp2, level=log.NOISY, umid="wxwgIQ") else: self._debug_counts["new_announcement"] += 1 + self.log("new announcement[%s]" % service_name, + parent=lp2, level=log.NOISY) - self._current_announcements[index] = ann_d + self._inbound_announcements[index] = (ann, key_s, time.time()) # note: we never forget an index, but we might update its value for (service_name2,cb,args,kwargs) in self._local_subscribers: if service_name2 == service_name: - eventually(cb, nodeid, ann_d, *args, **kwargs) + eventually(cb, key_s, ann, *args, **kwargs) def remote_set_encoding_parameters(self, parameters): self.encoding_parameters = parameters diff -Nru tahoe-lafs-1.9.2/src/allmydata/introducer/common.py tahoe-lafs-1.10.0/src/allmydata/introducer/common.py --- tahoe-lafs-1.9.2/src/allmydata/introducer/common.py 1970-01-01 00:00:00.000000000 +0000 +++ tahoe-lafs-1.10.0/src/allmydata/introducer/common.py 2013-09-03 15:38:27.000000000 +0000 @@ -0,0 +1,155 @@ + +import re, simplejson +from allmydata.util import keyutil, base32, rrefutil + +def make_index(ann, key_s): + """Return something that can be used as an index (e.g. a tuple of + strings), such that two messages that refer to the same 'thing' will have + the same index. This is a tuple of (service-name, signing-key, None) for + signed announcements, or (service-name, None, tubid_s) for unsigned + announcements.""" + + service_name = str(ann["service-name"]) + if key_s: + return (service_name, key_s, None) + else: + tubid_s = get_tubid_string_from_ann(ann) + return (service_name, None, tubid_s) + +def get_tubid_string_from_ann(ann): + return get_tubid_string(str(ann.get("anonymous-storage-FURL") + or ann.get("FURL"))) + +def get_tubid_string(furl): + m = re.match(r'pb://(\w+)@', furl) + assert m + return m.group(1).lower() + +def convert_announcement_v1_to_v2(ann_t): + (furl, service_name, ri_name, nickname, ver, oldest) = ann_t + assert type(furl) is str + assert type(service_name) is str + # ignore ri_name + assert type(nickname) is str + assert type(ver) is str + assert type(oldest) is str + ann = {"version": 0, + "nickname": nickname.decode("utf-8", "replace"), + "app-versions": {}, + "my-version": ver, + "oldest-supported": oldest, + + "service-name": service_name, + "anonymous-storage-FURL": furl, + "permutation-seed-base32": get_tubid_string(furl), + } + msg = simplejson.dumps(ann).encode("utf-8") + return (msg, None, None) + +def convert_announcement_v2_to_v1(ann_v2): + (msg, sig, pubkey) = ann_v2 + ann = simplejson.loads(msg) + assert ann["version"] == 0 + ann_t = (str(ann["anonymous-storage-FURL"]), + str(ann["service-name"]), + "remoteinterface-name is unused", + ann["nickname"].encode("utf-8"), + str(ann["my-version"]), + str(ann["oldest-supported"]), + ) + return ann_t + + +def sign_to_foolscap(ann, sk): + # return (bytes, None, None) or (bytes, sig-str, pubkey-str). A future + # HTTP-based serialization will use JSON({msg:b64(JSON(msg).utf8), + # sig:v0-b64(sig), pubkey:v0-b64(pubkey)}) . + msg = simplejson.dumps(ann).encode("utf-8") + if sk: + sig = "v0-"+base32.b2a(sk.sign(msg)) + vk_bytes = sk.get_verifying_key_bytes() + ann_t = (msg, sig, "v0-"+base32.b2a(vk_bytes)) + else: + ann_t = (msg, None, None) + return ann_t + +class UnknownKeyError(Exception): + pass + +def unsign_from_foolscap(ann_t): + (msg, sig_vs, claimed_key_vs) = ann_t + key_vs = None + if sig_vs and claimed_key_vs: + if not sig_vs.startswith("v0-"): + raise UnknownKeyError("only v0- signatures recognized") + if not claimed_key_vs.startswith("v0-"): + raise UnknownKeyError("only v0- keys recognized") + claimed_key = keyutil.parse_pubkey("pub-"+claimed_key_vs) + sig_bytes = base32.a2b(keyutil.remove_prefix(sig_vs, "v0-")) + claimed_key.verify(sig_bytes, msg) + key_vs = claimed_key_vs + ann = simplejson.loads(msg.decode("utf-8")) + return (ann, key_vs) + +class SubscriberDescriptor: + """This describes a subscriber, for status display purposes. It contains + the following attributes: + + .service_name: what they subscribed to (string) + .when: time when they subscribed (seconds since epoch) + .nickname: their self-provided nickname, or "?" (unicode) + .version: their self-provided version (string) + .app_versions: versions of each library they use (dict str->str) + .advertised_addresses: what hosts they listen on (list of strings) + .remote_address: the external address from which they connected (string) + .tubid: for subscribers connecting with Foolscap, their tubid (string) + """ + + def __init__(self, service_name, when, + nickname, version, app_versions, + advertised_addresses, remote_address, tubid): + self.service_name = service_name + self.when = when + self.nickname = nickname + self.version = version + self.app_versions = app_versions + self.advertised_addresses = advertised_addresses + self.remote_address = remote_address + self.tubid = tubid + +class AnnouncementDescriptor: + """This describes an announcement, for status display purposes. It + contains the following attributes, which will be empty ("" for + strings) if the client did not provide them: + + .when: time the announcement was first received (seconds since epoch) + .index: the announcements 'index', a tuple of (string-or-None). + The server remembers one announcement per index. + .canary: a Referenceable on the announcer, so the server can learn + when they disconnect (for the status display) + .announcement: raw dictionary of announcement data + .service_name: which service they are announcing (string) + .version: 'my-version' portion of announcement (string) + .nickname: their self-provided nickname, or "" (unicode) + .serverid: the server identifier. This is a pubkey (for V2 clients), + or a tubid (for V1 clients). + .advertised_addresses: which hosts they listen on (list of strings) + if the announcement included a key for + 'anonymous-storage-FURL', else an empty list. + """ + + def __init__(self, when, index, canary, ann_d): + self.when = when + self.index = index + self.canary = canary + self.announcement = ann_d + self.service_name = ann_d["service-name"] + self.version = ann_d.get("my-version", "") + self.nickname = ann_d.get("nickname", u"") + (service_name, key_s, tubid_s) = index + self.serverid = key_s or tubid_s + furl = ann_d.get("anonymous-storage-FURL") + if furl: + self.advertised_addresses = rrefutil.hosts_for_furl(furl) + else: + self.advertised_addresses = [] diff -Nru tahoe-lafs-1.9.2/src/allmydata/introducer/interfaces.py tahoe-lafs-1.10.0/src/allmydata/introducer/interfaces.py --- tahoe-lafs-1.9.2/src/allmydata/introducer/interfaces.py 2012-05-14 02:07:22.000000000 +0000 +++ tahoe-lafs-1.10.0/src/allmydata/introducer/interfaces.py 2013-09-03 15:38:27.000000000 +0000 @@ -1,9 +1,12 @@ from zope.interface import Interface from foolscap.api import StringConstraint, TupleOf, SetOf, DictOf, Any, \ - RemoteInterface + RemoteInterface, Referenceable +from old import RIIntroducerSubscriberClient_v1 FURL = StringConstraint(1000) +# old introducer protocol (v1): +# # Announcements are (FURL, service_name, remoteinterface_name, # nickname, my_version, oldest_supported) # the (FURL, service_name, remoteinterface_name) refer to the service being @@ -14,13 +17,17 @@ # incompatible peer. The second goal is to enable the development of # backwards-compatibility code. -Announcement = TupleOf(FURL, str, str, - str, str, str) +Announcement_v1 = TupleOf(FURL, str, str, + str, str, str) -class RIIntroducerSubscriberClient(RemoteInterface): - __remote_name__ = "RIIntroducerSubscriberClient.tahoe.allmydata.com" +# v2 protocol over foolscap: Announcements are 3-tuples of (bytes, str, str) +# or (bytes, none, none) +Announcement_v2 = Any() - def announce(announcements=SetOf(Announcement)): +class RIIntroducerSubscriberClient_v2(RemoteInterface): + __remote_name__ = "RIIntroducerSubscriberClient_v2.tahoe.allmydata.com" + + def announce_v2(announcements=SetOf(Announcement_v2)): """I accept announcements from the publisher.""" return None @@ -41,38 +48,29 @@ """ return None -# When Foolscap can handle multiple interfaces (Foolscap#17), the -# full-powered introducer will implement both RIIntroducerPublisher and -# RIIntroducerSubscriberService. Until then, we define -# RIIntroducerPublisherAndSubscriberService as a combination of the two, and -# make everybody use that. +SubscriberInfo = DictOf(str, Any()) -class RIIntroducerPublisher(RemoteInterface): +class RIIntroducerPublisherAndSubscriberService_v2(RemoteInterface): """To publish a service to the world, connect to me and give me your - announcement message. I will deliver a copy to all connected subscribers.""" - __remote_name__ = "RIIntroducerPublisher.tahoe.allmydata.com" - - def publish(announcement=Announcement): - # canary? - return None - -class RIIntroducerSubscriberService(RemoteInterface): - __remote_name__ = "RIIntroducerSubscriberService.tahoe.allmydata.com" - - def subscribe(subscriber=RIIntroducerSubscriberClient, service_name=str): - """Give me a subscriber reference, and I will call its new_peers() - method will any announcements that match the desired service name. I - will ignore duplicate subscriptions. - """ - return None - -class RIIntroducerPublisherAndSubscriberService(RemoteInterface): - __remote_name__ = "RIIntroducerPublisherAndSubscriberService.tahoe.allmydata.com" + announcement message. I will deliver a copy to all connected subscribers. + To hear about services, connect to me and subscribe to a specific + service_name.""" + __remote_name__ = "RIIntroducerPublisherAndSubscriberService_v2.tahoe.allmydata.com" def get_version(): return DictOf(str, Any()) - def publish(announcement=Announcement): + def publish(announcement=Announcement_v1): return None - def subscribe(subscriber=RIIntroducerSubscriberClient, service_name=str): + def publish_v2(announcement=Announcement_v2, canary=Referenceable): + return None + def subscribe(subscriber=RIIntroducerSubscriberClient_v1, service_name=str): + return None + def subscribe_v2(subscriber=RIIntroducerSubscriberClient_v2, + service_name=str, subscriber_info=SubscriberInfo): + """Give me a subscriber reference, and I will call its announce_v2() + method with any announcements that match the desired service name. I + will ignore duplicate subscriptions. The subscriber_info dictionary + tells me about the subscriber, and is used for diagnostic/status + displays.""" return None class IIntroducerClient(Interface): @@ -80,41 +78,47 @@ publish their services to the rest of the world, and I help them learn about services available on other nodes.""" - def publish(furl, service_name, remoteinterface_name): - """Once you call this, I will tell the world that the Referenceable - available at FURL is available to provide a service named - SERVICE_NAME. The precise definition of the service being provided is - identified by the Foolscap 'remote interface name' in the last - parameter: this is supposed to be a globally-unique string that - identifies the RemoteInterface that is implemented.""" + def publish(service_name, ann, signing_key=None): + """Publish the given announcement dictionary (which must be + JSON-serializable), plus some additional keys, to the world. + + Each announcement is characterized by a (service_name, serverid) + pair. When the server sees two announcements with the same pair, the + later one will replace the earlier one. The serverid is derived from + the signing_key, if present, otherwise it is derived from the + 'anonymous-storage-FURL' key. + + If signing_key= is set to an instance of SigningKey, it will be + used to sign the announcement.""" def subscribe_to(service_name, callback, *args, **kwargs): """Call this if you will eventually want to use services with the given SERVICE_NAME. This will prompt me to subscribe to announcements of those services. Your callback will be invoked with at least two - arguments: a serverid (binary string), and an announcement - dictionary, followed by any additional callback args/kwargs you give - me. I will run your callback for both new announcements and for + arguments: a pubkey and an announcement dictionary, followed by any + additional callback args/kwargs you gave me. The pubkey will be None + unless the announcement was signed by the corresponding pubkey, in + which case it will be a printable string like 'v0-base32..'. + + I will run your callback for both new announcements and for announcements that have changed, but you must be prepared to tolerate duplicates. - The announcement dictionary that I give you will have the following - keys: + The announcement that I give you comes from some other client. It + will be a JSON-serializable dictionary which (by convention) is + expected to have at least the following keys: version: 0 - service-name: str('storage') - - FURL: str(furl) - remoteinterface-name: str(ri_name) nickname: unicode app-versions: {} my-version: str oldest-supported: str - Note that app-version will be an empty dictionary until #466 is done - and both the introducer and the remote client have been upgraded. For - current (native) server types, the serverid will always be equal to - the binary form of the FURL's tubid. + service-name: str('storage') + anonymous-storage-FURL: str(furl) + + Note that app-version will be an empty dictionary if either the + publishing client or the Introducer are running older code. """ def connected_to_introducer(): diff -Nru tahoe-lafs-1.9.2/src/allmydata/introducer/old.py tahoe-lafs-1.10.0/src/allmydata/introducer/old.py --- tahoe-lafs-1.9.2/src/allmydata/introducer/old.py 1970-01-01 00:00:00.000000000 +0000 +++ tahoe-lafs-1.10.0/src/allmydata/introducer/old.py 2013-09-03 15:38:27.000000000 +0000 @@ -0,0 +1,493 @@ + +import time +from base64 import b32decode +from zope.interface import implements, Interface +from twisted.application import service +import allmydata +from allmydata.interfaces import InsufficientVersionError +from allmydata.util import log, idlib, rrefutil +from foolscap.api import StringConstraint, TupleOf, SetOf, DictOf, Any, \ + RemoteInterface, Referenceable, eventually, SturdyRef +from allmydata.introducer.common import SubscriberDescriptor, \ + AnnouncementDescriptor +FURL = StringConstraint(1000) + +# We keep a copy of the old introducer (both client and server) here to +# support compatibility tests. The old client is supposed to handle the new +# server, and new client is supposed to handle the old server. + + +# Announcements are (FURL, service_name, remoteinterface_name, +# nickname, my_version, oldest_supported) +# the (FURL, service_name, remoteinterface_name) refer to the service being +# announced. The (nickname, my_version, oldest_supported) refer to the +# client as a whole. The my_version/oldest_supported strings can be parsed +# by an allmydata.util.version.Version instance, and then compared. The +# first goal is to make sure that nodes are not confused by speaking to an +# incompatible peer. The second goal is to enable the development of +# backwards-compatibility code. + +Announcement = TupleOf(FURL, str, str, + str, str, str) + +class RIIntroducerSubscriberClient_v1(RemoteInterface): + __remote_name__ = "RIIntroducerSubscriberClient.tahoe.allmydata.com" + + def announce(announcements=SetOf(Announcement)): + """I accept announcements from the publisher.""" + return None + + def set_encoding_parameters(parameters=(int, int, int)): + """Advise the client of the recommended k-of-n encoding parameters + for this grid. 'parameters' is a tuple of (k, desired, n), where 'n' + is the total number of shares that will be created for any given + file, while 'k' is the number of shares that must be retrieved to + recover that file, and 'desired' is the minimum number of shares that + must be placed before the uploader will consider its job a success. + n/k is the expansion ratio, while k determines the robustness. + + Introducers should specify 'n' according to the expected size of the + grid (there is no point to producing more shares than there are + peers), and k according to the desired reliability-vs-overhead goals. + + Note that setting k=1 is equivalent to simple replication. + """ + return None + +# When Foolscap can handle multiple interfaces (Foolscap#17), the +# full-powered introducer will implement both RIIntroducerPublisher and +# RIIntroducerSubscriberService. Until then, we define +# RIIntroducerPublisherAndSubscriberService as a combination of the two, and +# make everybody use that. + +class RIIntroducerPublisher_v1(RemoteInterface): + """To publish a service to the world, connect to me and give me your + announcement message. I will deliver a copy to all connected subscribers.""" + __remote_name__ = "RIIntroducerPublisher.tahoe.allmydata.com" + + def publish(announcement=Announcement): + # canary? + return None + +class RIIntroducerSubscriberService_v1(RemoteInterface): + __remote_name__ = "RIIntroducerSubscriberService.tahoe.allmydata.com" + + def subscribe(subscriber=RIIntroducerSubscriberClient_v1, service_name=str): + """Give me a subscriber reference, and I will call its new_peers() + method will any announcements that match the desired service name. I + will ignore duplicate subscriptions. + """ + return None + +class RIIntroducerPublisherAndSubscriberService_v1(RemoteInterface): + __remote_name__ = "RIIntroducerPublisherAndSubscriberService.tahoe.allmydata.com" + def get_version(): + return DictOf(str, Any()) + def publish(announcement=Announcement): + return None + def subscribe(subscriber=RIIntroducerSubscriberClient_v1, service_name=str): + return None + +class IIntroducerClient(Interface): + """I provide service introduction facilities for a node. I help nodes + publish their services to the rest of the world, and I help them learn + about services available on other nodes.""" + + def publish(furl, service_name, remoteinterface_name): + """Once you call this, I will tell the world that the Referenceable + available at FURL is available to provide a service named + SERVICE_NAME. The precise definition of the service being provided is + identified by the Foolscap 'remote interface name' in the last + parameter: this is supposed to be a globally-unique string that + identifies the RemoteInterface that is implemented.""" + + def subscribe_to(service_name, callback, *args, **kwargs): + """Call this if you will eventually want to use services with the + given SERVICE_NAME. This will prompt me to subscribe to announcements + of those services. Your callback will be invoked with at least two + arguments: a serverid (binary string), and an announcement + dictionary, followed by any additional callback args/kwargs you give + me. I will run your callback for both new announcements and for + announcements that have changed, but you must be prepared to tolerate + duplicates. + + The announcement dictionary that I give you will have the following + keys: + + version: 0 + service-name: str('storage') + + FURL: str(furl) + remoteinterface-name: str(ri_name) + nickname: unicode + app-versions: {} + my-version: str + oldest-supported: str + + Note that app-version will be an empty dictionary until #466 is done + and both the introducer and the remote client have been upgraded. For + current (native) server types, the serverid will always be equal to + the binary form of the FURL's tubid. + """ + + def connected_to_introducer(): + """Returns a boolean, True if we are currently connected to the + introducer, False if not.""" + + +class IntroducerClient_v1(service.Service, Referenceable): + implements(RIIntroducerSubscriberClient_v1, IIntroducerClient) + + def __init__(self, tub, introducer_furl, + nickname, my_version, oldest_supported): + self._tub = tub + self.introducer_furl = introducer_furl + + assert type(nickname) is unicode + self._nickname_utf8 = nickname.encode("utf-8") # we always send UTF-8 + self._my_version = my_version + self._oldest_supported = oldest_supported + + self._published_announcements = set() + + self._publisher = None + + self._local_subscribers = [] # (servicename,cb,args,kwargs) tuples + self._subscribed_service_names = set() + self._subscriptions = set() # requests we've actually sent + + # _current_announcements remembers one announcement per + # (servicename,serverid) pair. Anything that arrives with the same + # pair will displace the previous one. This stores unpacked + # announcement dictionaries, which can be compared for equality to + # distinguish re-announcement from updates. It also provides memory + # for clients who subscribe after startup. + self._current_announcements = {} + + self.encoding_parameters = None + + # hooks for unit tests + self._debug_counts = { + "inbound_message": 0, + "inbound_announcement": 0, + "wrong_service": 0, + "duplicate_announcement": 0, + "update": 0, + "new_announcement": 0, + "outbound_message": 0, + } + self._debug_outstanding = 0 + + def _debug_retired(self, res): + self._debug_outstanding -= 1 + return res + + def startService(self): + service.Service.startService(self) + self._introducer_error = None + rc = self._tub.connectTo(self.introducer_furl, self._got_introducer) + self._introducer_reconnector = rc + def connect_failed(failure): + self.log("Initial Introducer connection failed: perhaps it's down", + level=log.WEIRD, failure=failure, umid="c5MqUQ") + d = self._tub.getReference(self.introducer_furl) + d.addErrback(connect_failed) + + def _got_introducer(self, publisher): + self.log("connected to introducer, getting versions") + default = { "http://allmydata.org/tahoe/protocols/introducer/v1": + { }, + "application-version": "unknown: no get_version()", + } + d = rrefutil.add_version_to_remote_reference(publisher, default) + d.addCallback(self._got_versioned_introducer) + d.addErrback(self._got_error) + + def _got_error(self, f): + # TODO: for the introducer, perhaps this should halt the application + self._introducer_error = f # polled by tests + + def _got_versioned_introducer(self, publisher): + self.log("got introducer version: %s" % (publisher.version,)) + # we require a V1 introducer + needed = "http://allmydata.org/tahoe/protocols/introducer/v1" + if needed not in publisher.version: + raise InsufficientVersionError(needed, publisher.version) + self._publisher = publisher + publisher.notifyOnDisconnect(self._disconnected) + self._maybe_publish() + self._maybe_subscribe() + + def _disconnected(self): + self.log("bummer, we've lost our connection to the introducer") + self._publisher = None + self._subscriptions.clear() + + def log(self, *args, **kwargs): + if "facility" not in kwargs: + kwargs["facility"] = "tahoe.introducer" + return log.msg(*args, **kwargs) + + + def publish(self, furl, service_name, remoteinterface_name): + assert type(self._nickname_utf8) is str # we always send UTF-8 + ann = (furl, service_name, remoteinterface_name, + self._nickname_utf8, self._my_version, self._oldest_supported) + self._published_announcements.add(ann) + self._maybe_publish() + + def subscribe_to(self, service_name, cb, *args, **kwargs): + self._local_subscribers.append( (service_name,cb,args,kwargs) ) + self._subscribed_service_names.add(service_name) + self._maybe_subscribe() + for (servicename,nodeid),ann_d in self._current_announcements.items(): + if servicename == service_name: + eventually(cb, nodeid, ann_d) + + def _maybe_subscribe(self): + if not self._publisher: + self.log("want to subscribe, but no introducer yet", + level=log.NOISY) + return + for service_name in self._subscribed_service_names: + if service_name not in self._subscriptions: + # there is a race here, but the subscription desk ignores + # duplicate requests. + self._subscriptions.add(service_name) + self._debug_outstanding += 1 + d = self._publisher.callRemote("subscribe", self, service_name) + d.addBoth(self._debug_retired) + d.addErrback(rrefutil.trap_deadref) + d.addErrback(log.err, format="server errored during subscribe", + facility="tahoe.introducer", + level=log.WEIRD, umid="2uMScQ") + + def _maybe_publish(self): + if not self._publisher: + self.log("want to publish, but no introducer yet", level=log.NOISY) + return + # this re-publishes everything. The Introducer ignores duplicates + for ann in self._published_announcements: + self._debug_counts["outbound_message"] += 1 + self._debug_outstanding += 1 + d = self._publisher.callRemote("publish", ann) + d.addBoth(self._debug_retired) + d.addErrback(rrefutil.trap_deadref) + d.addErrback(log.err, + format="server errored during publish %(ann)s", + ann=ann, facility="tahoe.introducer", + level=log.WEIRD, umid="xs9pVQ") + + + + def remote_announce(self, announcements): + self.log("received %d announcements" % len(announcements)) + self._debug_counts["inbound_message"] += 1 + for ann in announcements: + try: + self._process_announcement(ann) + except: + log.err(format="unable to process announcement %(ann)s", + ann=ann) + # Don't let a corrupt announcement prevent us from processing + # the remaining ones. Don't return an error to the server, + # since they'd just ignore it anyways. + pass + + def _process_announcement(self, ann): + self._debug_counts["inbound_announcement"] += 1 + (furl, service_name, ri_name, nickname_utf8, ver, oldest) = ann + if service_name not in self._subscribed_service_names: + self.log("announcement for a service we don't care about [%s]" + % (service_name,), level=log.UNUSUAL, umid="dIpGNA") + self._debug_counts["wrong_service"] += 1 + return + self.log("announcement for [%s]: %s" % (service_name, ann), + umid="BoKEag") + assert type(furl) is str + assert type(service_name) is str + assert type(ri_name) is str + assert type(nickname_utf8) is str + nickname = nickname_utf8.decode("utf-8") + assert type(nickname) is unicode + assert type(ver) is str + assert type(oldest) is str + + nodeid = b32decode(SturdyRef(furl).tubID.upper()) + nodeid_s = idlib.shortnodeid_b2a(nodeid) + + ann_d = { "version": 0, + "service-name": service_name, + + "FURL": furl, + "nickname": nickname, + "app-versions": {}, # need #466 and v2 introducer + "my-version": ver, + "oldest-supported": oldest, + } + + index = (service_name, nodeid) + if self._current_announcements.get(index, None) == ann_d: + self.log("reannouncement for [%(service)s]:%(nodeid)s, ignoring", + service=service_name, nodeid=nodeid_s, + level=log.UNUSUAL, umid="B1MIdA") + self._debug_counts["duplicate_announcement"] += 1 + return + if index in self._current_announcements: + self._debug_counts["update"] += 1 + else: + self._debug_counts["new_announcement"] += 1 + + self._current_announcements[index] = ann_d + # note: we never forget an index, but we might update its value + + for (service_name2,cb,args,kwargs) in self._local_subscribers: + if service_name2 == service_name: + eventually(cb, nodeid, ann_d, *args, **kwargs) + + def remote_set_encoding_parameters(self, parameters): + self.encoding_parameters = parameters + + def connected_to_introducer(self): + return bool(self._publisher) + +class IntroducerService_v1(service.MultiService, Referenceable): + implements(RIIntroducerPublisherAndSubscriberService_v1) + name = "introducer" + VERSION = { "http://allmydata.org/tahoe/protocols/introducer/v1": + { }, + "application-version": str(allmydata.__full_version__), + } + + def __init__(self, basedir="."): + service.MultiService.__init__(self) + self.introducer_url = None + # 'index' is (service_name, tubid) + self._announcements = {} # dict of index -> (announcement, timestamp) + self._subscribers = {} # [service_name]->[rref]->timestamp + self._debug_counts = {"inbound_message": 0, + "inbound_duplicate": 0, + "inbound_update": 0, + "outbound_message": 0, + "outbound_announcements": 0, + "inbound_subscribe": 0} + self._debug_outstanding = 0 + + def _debug_retired(self, res): + self._debug_outstanding -= 1 + return res + + def log(self, *args, **kwargs): + if "facility" not in kwargs: + kwargs["facility"] = "tahoe.introducer" + return log.msg(*args, **kwargs) + + def get_announcements(self, include_stub_clients=True): + announcements = [] + for index, (ann_t, when) in self._announcements.items(): + (furl, service_name, ri_name, nickname, ver, oldest) = ann_t + if service_name == "stub_client" and not include_stub_clients: + continue + ann_d = {"nickname": nickname.decode("utf-8", "replace"), + "my-version": ver, + "service-name": service_name, + "anonymous-storage-FURL": furl, + } + # the V2 introducer uses (service_name, key_s, tubid_s) as an + # index, so match that format for AnnouncementDescriptor + new_index = (index[0], None, idlib.nodeid_b2a(index[1])) + ad = AnnouncementDescriptor(when, new_index, None, ann_d) + announcements.append(ad) + return announcements + + def get_subscribers(self): + s = [] + for service_name, subscribers in self._subscribers.items(): + for rref, when in subscribers.items(): + tubid = rref.getRemoteTubID() or "?" + advertised_addresses = rrefutil.hosts_for_rref(rref) + remote_address = rrefutil.stringify_remote_address(rref) + nickname, version, app_versions = u"?", u"?", {} + sd = SubscriberDescriptor(service_name, when, + nickname, version, app_versions, + advertised_addresses, remote_address, + tubid) + s.append(sd) + return s + + def remote_get_version(self): + return self.VERSION + + def remote_publish(self, announcement): + try: + self._publish(announcement) + except: + log.err(format="Introducer.remote_publish failed on %(ann)s", + ann=announcement, level=log.UNUSUAL, umid="620rWA") + raise + + def _publish(self, announcement): + self._debug_counts["inbound_message"] += 1 + self.log("introducer: announcement published: %s" % (announcement,) ) + (furl, service_name, ri_name, nickname_utf8, ver, oldest) = announcement + #print "PUB", service_name, nickname_utf8 + + nodeid = b32decode(SturdyRef(furl).tubID.upper()) + index = (service_name, nodeid) + + if index in self._announcements: + (old_announcement, timestamp) = self._announcements[index] + if old_announcement == announcement: + self.log("but we already knew it, ignoring", level=log.NOISY) + self._debug_counts["inbound_duplicate"] += 1 + return + else: + self.log("old announcement being updated", level=log.NOISY) + self._debug_counts["inbound_update"] += 1 + self._announcements[index] = (announcement, time.time()) + + for s in self._subscribers.get(service_name, []): + self._debug_counts["outbound_message"] += 1 + self._debug_counts["outbound_announcements"] += 1 + self._debug_outstanding += 1 + d = s.callRemote("announce", set([announcement])) + d.addBoth(self._debug_retired) + d.addErrback(rrefutil.trap_deadref) + d.addErrback(log.err, + format="subscriber errored on announcement %(ann)s", + ann=announcement, facility="tahoe.introducer", + level=log.UNUSUAL, umid="jfGMXQ") + + def remote_subscribe(self, subscriber, service_name): + self.log("introducer: subscription[%s] request at %s" % (service_name, + subscriber)) + self._debug_counts["inbound_subscribe"] += 1 + if service_name not in self._subscribers: + self._subscribers[service_name] = {} + subscribers = self._subscribers[service_name] + if subscriber in subscribers: + self.log("but they're already subscribed, ignoring", + level=log.UNUSUAL) + return + subscribers[subscriber] = time.time() + def _remove(): + self.log("introducer: unsubscribing[%s] %s" % (service_name, + subscriber)) + subscribers.pop(subscriber, None) + subscriber.notifyOnDisconnect(_remove) + + announcements = set( + [ ann + for (sn2,nodeid),(ann,when) in self._announcements.items() + if sn2 == service_name] ) + + self._debug_counts["outbound_message"] += 1 + self._debug_counts["outbound_announcements"] += len(announcements) + self._debug_outstanding += 1 + d = subscriber.callRemote("announce", announcements) + d.addBoth(self._debug_retired) + d.addErrback(rrefutil.trap_deadref) + d.addErrback(log.err, + format="subscriber errored during subscribe %(anns)s", + anns=announcements, facility="tahoe.introducer", + level=log.UNUSUAL, umid="1XChxA") diff -Nru tahoe-lafs-1.9.2/src/allmydata/introducer/server.py tahoe-lafs-1.10.0/src/allmydata/introducer/server.py --- tahoe-lafs-1.9.2/src/allmydata/introducer/server.py 2012-05-14 03:01:09.000000000 +0000 +++ tahoe-lafs-1.10.0/src/allmydata/introducer/server.py 2013-09-03 15:38:27.000000000 +0000 @@ -1,14 +1,20 @@ -import time, os.path -from base64 import b32decode +import time, os.path, textwrap from zope.interface import implements from twisted.application import service -from foolscap.api import Referenceable, SturdyRef +from foolscap.api import Referenceable import allmydata from allmydata import node from allmydata.util import log, rrefutil +from allmydata.util.encodingutil import get_filesystem_encoding from allmydata.introducer.interfaces import \ - RIIntroducerPublisherAndSubscriberService + RIIntroducerPublisherAndSubscriberService_v2 +from allmydata.introducer.common import convert_announcement_v1_to_v2, \ + convert_announcement_v2_to_v1, unsign_from_foolscap, make_index, \ + get_tubid_string_from_ann, SubscriberDescriptor, AnnouncementDescriptor + +class FurlFileConflictError(Exception): + pass class IntroducerNode(node.Node): PORTNUMFILE = "introducer.port" @@ -27,18 +33,33 @@ introducerservice = IntroducerService(self.basedir) self.add_service(introducerservice) + old_public_fn = os.path.join(self.basedir, "introducer.furl").encode(get_filesystem_encoding()) + private_fn = os.path.join(self.basedir, "private", "introducer.furl").encode(get_filesystem_encoding()) + + if os.path.exists(old_public_fn): + if os.path.exists(private_fn): + msg = """This directory (%s) contains both an old public + 'introducer.furl' file, and a new-style + 'private/introducer.furl', so I cannot safely remove the old + one. Please make sure your desired FURL is in + private/introducer.furl, and remove the public file. If this + causes your Introducer's FURL to change, you need to inform + all grid members so they can update their tahoe.cfg. + """ + raise FurlFileConflictError(textwrap.dedent(msg)) + os.rename(old_public_fn, private_fn) d = self.when_tub_ready() def _publish(res): - self.introducer_url = self.tub.registerReference(introducerservice, - "introducer") - self.log(" introducer is at %s" % self.introducer_url) - self.write_config("introducer.furl", self.introducer_url + "\n") + furl = self.tub.registerReference(introducerservice, + furlFile=private_fn) + self.log(" introducer is at %s" % furl, umid="qF2L9A") + self.introducer_url = furl # for tests d.addCallback(_publish) d.addErrback(log.err, facility="tahoe.init", level=log.BAD, umid="UaNs9A") def init_web(self, webport): - self.log("init_web(webport=%s)", args=(webport,)) + self.log("init_web(webport=%s)", args=(webport,), umid="2bUygA") from allmydata.webish import IntroducerWebishServer nodeurl_path = os.path.join(self.basedir, "node.url") @@ -47,105 +68,306 @@ ws = IntroducerWebishServer(self, webport, nodeurl_path, staticdir) self.add_service(ws) +class WrapV1SubscriberInV2Interface: # for_v1 + """I wrap a RemoteReference that points at an old v1 subscriber, enabling + it to be treated like a v2 subscriber. + """ + + def __init__(self, original): + self.original = original # also used for tests + def __eq__(self, them): + return self.original == them + def __ne__(self, them): + return self.original != them + def __hash__(self): + return hash(self.original) + def getRemoteTubID(self): + return self.original.getRemoteTubID() + def getSturdyRef(self): + return self.original.getSturdyRef() + def getPeer(self): + return self.original.getPeer() + def getLocationHints(self): + return self.original.getLocationHints() + def callRemote(self, methname, *args, **kwargs): + m = getattr(self, "wrap_" + methname) + return m(*args, **kwargs) + def wrap_announce_v2(self, announcements): + anns_v1 = [convert_announcement_v2_to_v1(ann) for ann in announcements] + return self.original.callRemote("announce", set(anns_v1)) + def wrap_set_encoding_parameters(self, parameters): + # note: unused + return self.original.callRemote("set_encoding_parameters", parameters) + def notifyOnDisconnect(self, *args, **kwargs): + return self.original.notifyOnDisconnect(*args, **kwargs) + class IntroducerService(service.MultiService, Referenceable): - implements(RIIntroducerPublisherAndSubscriberService) + implements(RIIntroducerPublisherAndSubscriberService_v2) name = "introducer" - VERSION = { "http://allmydata.org/tahoe/protocols/introducer/v1": - { }, + # v1 is the original protocol, supported since 1.0 (but only advertised + # starting in 1.3). v2 is the new signed protocol, supported after 1.9 + VERSION = { "http://allmydata.org/tahoe/protocols/introducer/v1": { }, + "http://allmydata.org/tahoe/protocols/introducer/v2": { }, "application-version": str(allmydata.__full_version__), } def __init__(self, basedir="."): service.MultiService.__init__(self) self.introducer_url = None - # 'index' is (service_name, tubid) - self._announcements = {} # dict of index -> (announcement, timestamp) - self._subscribers = {} # dict of (rref->timestamp) dicts + # 'index' is (service_name, key_s, tubid), where key_s or tubid is + # None + self._announcements = {} # dict of index -> + # (ann_t, canary, ann, timestamp) + + # ann (the announcement dictionary) is cleaned up: nickname is always + # unicode, servicename is always ascii, etc, even though + # simplejson.loads sometimes returns either + + # self._subscribers is a dict mapping servicename to subscriptions + # 'subscriptions' is a dict mapping rref to a subscription + # 'subscription' is a tuple of (subscriber_info, timestamp) + # 'subscriber_info' is a dict, provided directly for v2 clients, or + # synthesized for v1 clients. The expected keys are: + # version, nickname, app-versions, my-version, oldest-supported + self._subscribers = {} + + # self._stub_client_announcements contains the information provided + # by v1 clients. We stash this so we can match it up with their + # subscriptions. + self._stub_client_announcements = {} # maps tubid to sinfo # for_v1 + self._debug_counts = {"inbound_message": 0, "inbound_duplicate": 0, + "inbound_no_seqnum": 0, + "inbound_old_replay": 0, "inbound_update": 0, "outbound_message": 0, "outbound_announcements": 0, "inbound_subscribe": 0} + self._debug_outstanding = 0 # also covers WrapV1SubscriberInV2Interface + + def _debug_retired(self, res): + self._debug_outstanding -= 1 + return res def log(self, *args, **kwargs): if "facility" not in kwargs: - kwargs["facility"] = "tahoe.introducer" + kwargs["facility"] = "tahoe.introducer.server" return log.msg(*args, **kwargs) - def get_announcements(self): - return self._announcements + def get_announcements(self, include_stub_clients=True): + """Return a list of AnnouncementDescriptor for all announcements""" + announcements = [] + for (index, (_, canary, ann, when)) in self._announcements.items(): + if ann["service-name"] == "stub_client": + if not include_stub_clients: + continue + ad = AnnouncementDescriptor(when, index, canary, ann) + announcements.append(ad) + return announcements + def get_subscribers(self): - return self._subscribers + """Return a list of SubscriberDescriptor objects for all subscribers""" + s = [] + for service_name, subscriptions in self._subscribers.items(): + for rref,(subscriber_info,when) in subscriptions.items(): + # note that if the subscriber didn't do Tub.setLocation, + # tubid will be None. Also, subscribers do not tell us which + # pubkey they use; only publishers do that. + tubid = rref.getRemoteTubID() or "?" + advertised_addresses = rrefutil.hosts_for_rref(rref) + remote_address = rrefutil.stringify_remote_address(rref) + # these three assume subscriber_info["version"]==0, but + # should tolerate other versions + if not subscriber_info: + # V1 clients that haven't yet sent their stub_info data + subscriber_info = {} + nickname = subscriber_info.get("nickname", u"?") + version = subscriber_info.get("my-version", u"?") + app_versions = subscriber_info.get("app-versions", {}) + # 'when' is the time they subscribed + sd = SubscriberDescriptor(service_name, when, + nickname, version, app_versions, + advertised_addresses, remote_address, + tubid) + s.append(sd) + return s def remote_get_version(self): return self.VERSION - def remote_publish(self, announcement): + def remote_publish(self, ann_t): # for_v1 + lp = self.log("introducer: old (v1) announcement published: %s" + % (ann_t,), umid="6zGOIw") + ann_v2 = convert_announcement_v1_to_v2(ann_t) + return self.publish(ann_v2, None, lp) + + def remote_publish_v2(self, ann_t, canary): + lp = self.log("introducer: announcement (v2) published", umid="L2QXkQ") + return self.publish(ann_t, canary, lp) + + def publish(self, ann_t, canary, lp): try: - self._publish(announcement) + self._publish(ann_t, canary, lp) except: log.err(format="Introducer.remote_publish failed on %(ann)s", - ann=announcement, level=log.UNUSUAL, umid="620rWA") + ann=ann_t, + level=log.UNUSUAL, parent=lp, umid="620rWA") raise - def _publish(self, announcement): + def _publish(self, ann_t, canary, lp): self._debug_counts["inbound_message"] += 1 - self.log("introducer: announcement published: %s" % (announcement,) ) - (furl, service_name, ri_name, nickname_utf8, ver, oldest) = announcement - - nodeid = b32decode(SturdyRef(furl).tubID.upper()) - index = (service_name, nodeid) + self.log("introducer: announcement published: %s" % (ann_t,), + umid="wKHgCw") + ann, key = unsign_from_foolscap(ann_t) # might raise BadSignatureError + index = make_index(ann, key) + + service_name = str(ann["service-name"]) + if service_name == "stub_client": # for_v1 + self._attach_stub_client(ann, lp) + return - if index in self._announcements: - (old_announcement, timestamp) = self._announcements[index] - if old_announcement == announcement: - self.log("but we already knew it, ignoring", level=log.NOISY) + old = self._announcements.get(index) + if old: + (old_ann_t, canary, old_ann, timestamp) = old + if old_ann == ann: + self.log("but we already knew it, ignoring", level=log.NOISY, + umid="myxzLw") self._debug_counts["inbound_duplicate"] += 1 return else: - self.log("old announcement being updated", level=log.NOISY) + if "seqnum" in old_ann: + # must beat previous sequence number to replace + if ("seqnum" not in ann + or not isinstance(ann["seqnum"], (int,long))): + self.log("not replacing old ann, no valid seqnum", + level=log.NOISY, umid="ySbaVw") + self._debug_counts["inbound_no_seqnum"] += 1 + return + if ann["seqnum"] <= old_ann["seqnum"]: + self.log("not replacing old ann, new seqnum is too old" + " (%s <= %s) (replay attack?)" + % (ann["seqnum"], old_ann["seqnum"]), + level=log.UNUSUAL, umid="sX7yqQ") + self._debug_counts["inbound_old_replay"] += 1 + return + # ok, seqnum is newer, allow replacement + self.log("old announcement being updated", level=log.NOISY, + umid="304r9g") self._debug_counts["inbound_update"] += 1 - self._announcements[index] = (announcement, time.time()) + self._announcements[index] = (ann_t, canary, ann, time.time()) + #if canary: + # canary.notifyOnDisconnect ... + # use a CanaryWatcher? with cw.is_connected()? + # actually we just want foolscap to give rref.is_connected(), since + # this is only for the status display for s in self._subscribers.get(service_name, []): self._debug_counts["outbound_message"] += 1 self._debug_counts["outbound_announcements"] += 1 - d = s.callRemote("announce", set([announcement])) - d.addErrback(rrefutil.trap_deadref) + self._debug_outstanding += 1 + d = s.callRemote("announce_v2", set([ann_t])) + d.addBoth(self._debug_retired) d.addErrback(log.err, format="subscriber errored on announcement %(ann)s", - ann=announcement, facility="tahoe.introducer", + ann=ann_t, facility="tahoe.introducer", level=log.UNUSUAL, umid="jfGMXQ") - def remote_subscribe(self, subscriber, service_name): - self.log("introducer: subscription[%s] request at %s" % (service_name, - subscriber)) + def _attach_stub_client(self, ann, lp): + # There might be a v1 subscriber for whom this is a stub_client. + # We might have received the subscription before the stub_client + # announcement, in which case we now need to fix up the record in + # self._subscriptions . + + # record it for later, in case the stub_client arrived before the + # subscription + subscriber_info = self._get_subscriber_info_from_ann(ann) + ann_tubid = get_tubid_string_from_ann(ann) + self._stub_client_announcements[ann_tubid] = subscriber_info + + lp2 = self.log("stub_client announcement, " + "looking for matching subscriber", + parent=lp, level=log.NOISY, umid="BTywDg") + + for sn in self._subscribers: + s = self._subscribers[sn] + for (subscriber, info) in s.items(): + # we correlate these by looking for a subscriber whose tubid + # matches this announcement + sub_tubid = subscriber.getRemoteTubID() + if sub_tubid == ann_tubid: + self.log(format="found a match, nodeid=%(nodeid)s", + nodeid=sub_tubid, + level=log.NOISY, parent=lp2, umid="xsWs1A") + # found a match. Does it need info? + if not info[0]: + self.log(format="replacing info", + level=log.NOISY, parent=lp2, umid="m5kxwA") + # yup + s[subscriber] = (subscriber_info, info[1]) + # and we don't remember or announce stub_clients beyond what we + # need to get the subscriber_info set up + + def _get_subscriber_info_from_ann(self, ann): # for_v1 + sinfo = { "version": ann["version"], + "nickname": ann["nickname"], + "app-versions": ann["app-versions"], + "my-version": ann["my-version"], + "oldest-supported": ann["oldest-supported"], + } + return sinfo + + def remote_subscribe(self, subscriber, service_name): # for_v1 + self.log("introducer: old (v1) subscription[%s] request at %s" + % (service_name, subscriber), umid="hJlGUg") + return self.add_subscriber(WrapV1SubscriberInV2Interface(subscriber), + service_name, None) + + def remote_subscribe_v2(self, subscriber, service_name, subscriber_info): + self.log("introducer: subscription[%s] request at %s" + % (service_name, subscriber), umid="U3uzLg") + return self.add_subscriber(subscriber, service_name, subscriber_info) + + def add_subscriber(self, subscriber, service_name, subscriber_info): self._debug_counts["inbound_subscribe"] += 1 if service_name not in self._subscribers: self._subscribers[service_name] = {} subscribers = self._subscribers[service_name] if subscriber in subscribers: self.log("but they're already subscribed, ignoring", - level=log.UNUSUAL) + level=log.UNUSUAL, umid="Sy9EfA") return - subscribers[subscriber] = time.time() + + if not subscriber_info: # for_v1 + # v1 clients don't provide subscriber_info, but they should + # publish a 'stub client' record which contains the same + # information. If we've already received this, it will be in + # self._stub_client_announcements + tubid = subscriber.getRemoteTubID() + if tubid in self._stub_client_announcements: + subscriber_info = self._stub_client_announcements[tubid] + + subscribers[subscriber] = (subscriber_info, time.time()) def _remove(): self.log("introducer: unsubscribing[%s] %s" % (service_name, - subscriber)) + subscriber), + umid="vYGcJg") subscribers.pop(subscriber, None) subscriber.notifyOnDisconnect(_remove) - announcements = set( - [ ann - for (sn2,nodeid),(ann,when) in self._announcements.items() - if sn2 == service_name] ) - - self._debug_counts["outbound_message"] += 1 - self._debug_counts["outbound_announcements"] += len(announcements) - d = subscriber.callRemote("announce", announcements) - d.addErrback(rrefutil.trap_deadref) - d.addErrback(log.err, - format="subscriber errored during subscribe %(anns)s", - anns=announcements, facility="tahoe.introducer", - level=log.UNUSUAL, umid="mtZepQ") + # now tell them about any announcements they're interested in + announcements = set( [ ann_t + for idx,(ann_t,canary,ann,when) + in self._announcements.items() + if idx[0] == service_name] ) + if announcements: + self._debug_counts["outbound_message"] += 1 + self._debug_counts["outbound_announcements"] += len(announcements) + self._debug_outstanding += 1 + d = subscriber.callRemote("announce_v2", announcements) + d.addBoth(self._debug_retired) + d.addErrback(log.err, + format="subscriber errored during subscribe %(anns)s", + anns=announcements, facility="tahoe.introducer", + level=log.UNUSUAL, umid="mtZepQ") + return d diff -Nru tahoe-lafs-1.9.2/src/allmydata/monitor.py tahoe-lafs-1.10.0/src/allmydata/monitor.py --- tahoe-lafs-1.9.2/src/allmydata/monitor.py 2012-05-14 02:07:22.000000000 +0000 +++ tahoe-lafs-1.10.0/src/allmydata/monitor.py 2013-09-03 15:38:27.000000000 +0000 @@ -27,27 +27,27 @@ # the following methods are provided for the operation code - def is_cancelled(self): + def is_cancelled(): """Returns True if the operation has been cancelled. If True, operation code should stop creating new work, and attempt to stop any work already in progress.""" - def raise_if_cancelled(self): + def raise_if_cancelled(): """Raise OperationCancelledError if the operation has been cancelled. Operation code that has a robust error-handling path can simply call this periodically.""" - def set_status(self, status): + def set_status(status): """Sets the Monitor's 'status' object to an arbitrary value. Different operations will store different sorts of status information here. Operation code should use get+modify+set sequences to update this.""" - def get_status(self): + def get_status(): """Return the status object. If the operation failed, this will be a Failure instance.""" - def finish(self, status): + def finish(status): """Call this when the operation is done, successful or not. The Monitor's lifetime is influenced by the completion of the operation it is monitoring. The Monitor's 'status' value will be set with the @@ -60,24 +60,26 @@ # the following methods are provided for the initiator of the operation - def is_finished(self): + def is_finished(): """Return a boolean, True if the operation is done (whether successful or failed), False if it is still running.""" - def when_done(self): + def when_done(): """Return a Deferred that fires when the operation is complete. It will fire with the operation status, the same value as returned by get_status().""" - def cancel(self): + def cancel(): """Cancel the operation as soon as possible. is_cancelled() will start returning True after this is called.""" # get_status() is useful too, but it is operation-specific + class OperationCancelledError(Exception): pass + class Monitor: implements(IMonitor) diff -Nru tahoe-lafs-1.9.2/src/allmydata/mutable/checker.py tahoe-lafs-1.10.0/src/allmydata/mutable/checker.py --- tahoe-lafs-1.9.2/src/allmydata/mutable/checker.py 2012-05-14 02:50:17.000000000 +0000 +++ tahoe-lafs-1.10.0/src/allmydata/mutable/checker.py 2013-09-03 15:38:27.000000000 +0000 @@ -1,6 +1,6 @@ from allmydata.uri import from_string -from allmydata.util import base32, log +from allmydata.util import base32, log, dictutil from allmydata.check_results import CheckAndRepairResults, CheckResults from allmydata.mutable.common import MODE_CHECK, MODE_WRITE, CorruptShareError @@ -17,7 +17,6 @@ self._monitor = monitor self.bad_shares = [] # list of (server,shnum,failure) self._storage_index = self._node.get_storage_index() - self.results = CheckResults(from_string(node.get_uri()), self._storage_index) self.need_repair = False self.responded = set() # set of (binary) nodeids @@ -36,8 +35,7 @@ if verify: d.addCallback(self._verify_all_shares) d.addCallback(lambda res: servermap) - d.addCallback(self._fill_checker_results, self.results) - d.addCallback(lambda res: self.results) + d.addCallback(self._make_checker_results) return d def _got_mapupdate_results(self, servermap): @@ -122,18 +120,14 @@ return counters - def _fill_checker_results(self, smap, r): + def _make_checker_results(self, smap): self._monitor.raise_if_cancelled() - r.set_servermap(smap.copy()) healthy = True - data = {} report = [] summary = [] vmap = smap.make_versionmap() recoverable = smap.recoverable_versions() unrecoverable = smap.unrecoverable_versions() - data["count-recoverable-versions"] = len(recoverable) - data["count-unrecoverable-versions"] = len(unrecoverable) if recoverable: report.append("Recoverable Versions: " + @@ -164,7 +158,6 @@ report.append("Best Recoverable Version: " + smap.summarize_version(best_version)) counters = self._count_shares(smap, best_version) - data.update(counters) s = counters["count-shares-good"] k = counters["count-shares-needed"] N = counters["count-shares-expected"] @@ -180,66 +173,78 @@ # find a k and N from somewhere first = list(unrecoverable)[0] # not exactly the best version, but that doesn't matter too much - data.update(self._count_shares(smap, first)) + counters = self._count_shares(smap, first) # leave needs_rebalancing=False: the file being unrecoverable is # the bigger problem else: # couldn't find anything at all - data["count-shares-good"] = 0 - data["count-shares-needed"] = 3 # arbitrary defaults - data["count-shares-expected"] = 10 - data["count-good-share-hosts"] = 0 - data["count-wrong-shares"] = 0 + counters = { + "count-shares-good": 0, + "count-shares-needed": 3, # arbitrary defaults + "count-shares-expected": 10, + "count-good-share-hosts": 0, + "count-wrong-shares": 0, + } + corrupt_share_locators = [] + problems = [] if self.bad_shares: - data["count-corrupt-shares"] = len(self.bad_shares) - data["list-corrupt-shares"] = locators = [] report.append("Corrupt Shares:") summary.append("Corrupt Shares:") - for (server, shnum, f) in sorted(self.bad_shares): - serverid = server.get_serverid() - locators.append( (serverid, self._storage_index, shnum) ) - s = "%s-sh%d" % (server.get_name(), shnum) - if f.check(CorruptShareError): - ft = f.value.reason - else: - ft = str(f) - report.append(" %s: %s" % (s, ft)) - summary.append(s) - p = (serverid, self._storage_index, shnum, f) - r.problems.append(p) - msg = ("CorruptShareError during mutable verify, " - "serverid=%(serverid)s, si=%(si)s, shnum=%(shnum)d, " - "where=%(where)s") - log.msg(format=msg, serverid=server.get_name(), - si=base32.b2a(self._storage_index), - shnum=shnum, - where=ft, - level=log.WEIRD, umid="EkK8QA") - else: - data["count-corrupt-shares"] = 0 - data["list-corrupt-shares"] = [] + for (server, shnum, f) in sorted(self.bad_shares): + serverid = server.get_serverid() + locator = (server, self._storage_index, shnum) + corrupt_share_locators.append(locator) + s = "%s-sh%d" % (server.get_name(), shnum) + if f.check(CorruptShareError): + ft = f.value.reason + else: + ft = str(f) + report.append(" %s: %s" % (s, ft)) + summary.append(s) + p = (serverid, self._storage_index, shnum, f) + problems.append(p) + msg = ("CorruptShareError during mutable verify, " + "serverid=%(serverid)s, si=%(si)s, shnum=%(shnum)d, " + "where=%(where)s") + log.msg(format=msg, serverid=server.get_name(), + si=base32.b2a(self._storage_index), + shnum=shnum, + where=ft, + level=log.WEIRD, umid="EkK8QA") - sharemap = {} + sharemap = dictutil.DictOfSets() for verinfo in vmap: for (shnum, server, timestamp) in vmap[verinfo]: shareid = "%s-sh%d" % (smap.summarize_version(verinfo), shnum) - if shareid not in sharemap: - sharemap[shareid] = [] - sharemap[shareid].append(server.get_serverid()) - data["sharemap"] = sharemap - data["servers-responding"] = [s.get_serverid() for s in - list(smap.get_reachable_servers())] - - r.set_healthy(healthy) - r.set_recoverable(bool(recoverable)) - r.set_needs_rebalancing(needs_rebalancing) - r.set_data(data) + sharemap.add(shareid, server) if healthy: - r.set_summary("Healthy") + summary = "Healthy" else: - r.set_summary("Unhealthy: " + " ".join(summary)) - r.set_report(report) + summary = "Unhealthy: " + " ".join(summary) + + cr = CheckResults(from_string(self._node.get_uri()), + self._storage_index, + healthy=healthy, recoverable=bool(recoverable), + needs_rebalancing=needs_rebalancing, + count_shares_needed=counters["count-shares-needed"], + count_shares_expected=counters["count-shares-expected"], + count_shares_good=counters["count-shares-good"], + count_good_share_hosts=counters["count-good-share-hosts"], + count_recoverable_versions=len(recoverable), + count_unrecoverable_versions=len(unrecoverable), + servers_responding=list(smap.get_reachable_servers()), + sharemap=sharemap, + count_wrong_shares=counters["count-wrong-shares"], + list_corrupt_shares=corrupt_share_locators, + count_corrupt_shares=len(corrupt_share_locators), + list_incompatible_shares=[], + count_incompatible_shares=0, + summary=summary, + report=report, + share_problems=problems, + servermap=smap.copy()) + return cr class MutableCheckAndRepairer(MutableChecker): @@ -248,38 +253,42 @@ def __init__(self, node, storage_broker, history, monitor): MutableChecker.__init__(self, node, storage_broker, history, monitor) self.cr_results = CheckAndRepairResults(self._storage_index) - self.cr_results.pre_repair_results = self.results self.need_repair = False def check(self, verify=False, add_lease=False): d = MutableChecker.check(self, verify, add_lease) + d.addCallback(self._stash_pre_repair_results) d.addCallback(self._maybe_repair) d.addCallback(lambda res: self.cr_results) return d - def _maybe_repair(self, res): + def _stash_pre_repair_results(self, pre_repair_results): + self.cr_results.pre_repair_results = pre_repair_results + return pre_repair_results + + def _maybe_repair(self, pre_repair_results): + crr = self.cr_results self._monitor.raise_if_cancelled() if not self.need_repair: - self.cr_results.post_repair_results = self.results + crr.post_repair_results = pre_repair_results return if self._node.is_readonly(): # ticket #625: we cannot yet repair read-only mutable files - self.cr_results.post_repair_results = self.results - self.cr_results.repair_attempted = False + crr.post_repair_results = pre_repair_results + crr.repair_attempted = False + return + crr.repair_attempted = True + d = self._node.repair(pre_repair_results, monitor=self._monitor) + def _repair_finished(rr): + crr.repair_successful = rr.get_successful() + crr.post_repair_results = self._make_checker_results(rr.servermap) + crr.repair_results = rr # TODO? return - self.cr_results.repair_attempted = True - d = self._node.repair(self.results, monitor=self._monitor) - def _repair_finished(repair_results): - self.cr_results.repair_successful = repair_results.get_successful() - r = CheckResults(from_string(self._node.get_uri()), self._storage_index) - self.cr_results.post_repair_results = r - self._fill_checker_results(repair_results.servermap, r) - self.cr_results.repair_results = repair_results # TODO? def _repair_error(f): # I'm not sure if I want to pass through a failure or not. - self.cr_results.repair_successful = False - self.cr_results.repair_failure = f # TODO? - #self.cr_results.post_repair_results = ?? + crr.repair_successful = False + crr.repair_failure = f # TODO? + #crr.post_repair_results = ?? return f d.addCallbacks(_repair_finished, _repair_error) return d diff -Nru tahoe-lafs-1.9.2/src/allmydata/mutable/common.py tahoe-lafs-1.10.0/src/allmydata/mutable/common.py --- tahoe-lafs-1.9.2/src/allmydata/mutable/common.py 2012-05-14 02:50:17.000000000 +0000 +++ tahoe-lafs-1.10.0/src/allmydata/mutable/common.py 2013-09-03 15:38:27.000000000 +0000 @@ -1,6 +1,4 @@ -from allmydata.util.spans import DataSpans - MODE_CHECK = "MODE_CHECK" # query all peers MODE_ANYTHING = "MODE_ANYTHING" # one recoverable version MODE_WRITE = "MODE_WRITE" # replace all shares, probably.. not for initial @@ -59,56 +57,3 @@ class UnknownVersionError(BadShareError): """The share we received was of a version we don't recognize.""" - -class ResponseCache: - """I cache share data, to reduce the number of round trips used during - mutable file operations. All of the data in my cache is for a single - storage index, but I will keep information on multiple shares for - that storage index. - - I maintain a highest-seen sequence number, and will flush all entries - each time this number increases (this doesn't necessarily imply that - all entries have the same sequence number). - - My cache is indexed by a (verinfo, shnum) tuple. - - My cache entries are DataSpans instances, each representing a set of - non-overlapping byteranges. - """ - - def __init__(self): - self.cache = {} - self.seqnum = None - - def _clear(self): - # also used by unit tests - self.cache = {} - - def add(self, verinfo, shnum, offset, data): - seqnum = verinfo[0] - if seqnum > self.seqnum: - self._clear() - self.seqnum = seqnum - - index = (verinfo, shnum) - if index in self.cache: - self.cache[index].add(offset, data) - else: - spans = DataSpans() - spans.add(offset, data) - self.cache[index] = spans - - def read(self, verinfo, shnum, offset, length): - """Try to satisfy a read request from cache. - Returns data, or None if the cache did not hold the entire requested span. - """ - - # TODO: perhaps return a DataSpans object representing the fragments - # that we have, instead of only returning a hit if we can satisfy the - # whole request from cache. - - index = (verinfo, shnum) - if index in self.cache: - return self.cache[index].get(offset, length) - else: - return None diff -Nru tahoe-lafs-1.9.2/src/allmydata/mutable/filenode.py tahoe-lafs-1.10.0/src/allmydata/mutable/filenode.py --- tahoe-lafs-1.9.2/src/allmydata/mutable/filenode.py 2012-07-03 16:32:46.000000000 +0000 +++ tahoe-lafs-1.10.0/src/allmydata/mutable/filenode.py 2013-09-03 15:38:27.000000000 +0000 @@ -17,7 +17,7 @@ from allmydata.mutable.publish import Publish, MutableData,\ TransformingUploadable from allmydata.mutable.common import MODE_READ, MODE_WRITE, MODE_CHECK, UnrecoverableFileError, \ - ResponseCache, UncoordinatedWriteError + UncoordinatedWriteError from allmydata.mutable.servermap import ServerMap, ServermapUpdater from allmydata.mutable.retrieve import Retrieve from allmydata.mutable.checker import MutableChecker, MutableCheckAndRepairer @@ -65,7 +65,6 @@ self._required_shares = default_encoding_parameters["k"] self._total_shares = default_encoding_parameters["n"] self._sharemap = {} # known shares, shnum-to-[nodeids] - self._cache = ResponseCache() self._most_recent_size = None # filled in after __init__ if we're being created for the first time; # filled in by the servermap updater before publishing, otherwise. @@ -180,10 +179,6 @@ self._privkey = privkey def _populate_encprivkey(self, encprivkey): self._encprivkey = encprivkey - def _add_to_cache(self, verinfo, shnum, offset, data): - self._cache.add(verinfo, shnum, offset, data) - def _read_from_cache(self, verinfo, shnum, offset, length): - return self._cache.read(verinfo, shnum, offset, length) def get_write_enabler(self, server): seed = server.get_foolscap_write_enabler_seed() diff -Nru tahoe-lafs-1.9.2/src/allmydata/mutable/layout.py tahoe-lafs-1.10.0/src/allmydata/mutable/layout.py --- tahoe-lafs-1.9.2/src/allmydata/mutable/layout.py 2012-07-03 16:49:20.000000000 +0000 +++ tahoe-lafs-1.10.0/src/allmydata/mutable/layout.py 2013-09-03 15:38:27.000000000 +0000 @@ -20,7 +20,7 @@ # Q: The sequence number; this is sort of like a revision history for # mutable files; they start at 1 and increase as they are changed after # being uploaded. Stored as an unsigned 64-bit integer. -# 32s: The root hash of the share hash tree. We use sha-256d, so we use 32 +# 32s: The root hash of the share hash tree. We use sha-256d, so we use 32 # bytes to store the value. # 16s: The salt for the readkey. This is a 16-byte random value. # @@ -46,7 +46,7 @@ # to account for the possibility of a lot of share data. # Q: The offset of the EOF. An unsigned 64-bit integer, to account for # the possibility of a lot of share data. -# +# # After all of these, we have the following: # - The verification key: Occupies the space between the end of the header # and the start of the signature (i.e.: data[HEADER_LENGTH:o['signature']]. @@ -57,7 +57,7 @@ # - The share data, which goes from the share data offset to the encrypted # private key offset. # - The encrypted private key offset, which goes until the end of the file. -# +# # The block hash tree in this encoding has only one share, so the offset of # the share data will be 32 bits more than the offset of the block hash tree. # Given this, we may need to check to see how many bytes a reasonably sized @@ -248,7 +248,7 @@ self._segment_size = segment_size self._data_length = data_length - # This is an SDMF file, so it should have only one segment, so, + # This is an SDMF file, so it should have only one segment, so, # modulo padding of the data length, the segment size and the # data length should be the same. expected_segment_size = mathutil.next_multiple(data_length, @@ -610,12 +610,12 @@ # in meaning to what we have with SDMF files, except now instead of # using the literal salt, we use a value derived from all of the # salts -- the share hash root. - # + # # The salt is stored before the block for each segment. The block # hash tree is computed over the combination of block and salt for # each segment. In this way, we get integrity checking for both # block and salt with the current block hash tree arrangement. - # + # # The ordering of the offsets is different to reflect the dependencies # that we'll run into with an MDMF file. The expected write flow is # something like this: @@ -625,16 +625,16 @@ # and where they should go.. We can also figure out where the # encrypted private key should go, because we can figure out how # big the share data will be. - # + # # 1: Encrypt, encode, and upload the file in chunks. Do something - # like + # like # # put_block(data, segnum, salt) # # to write a block and a salt to the disk. We can do both of # these operations now because we have enough of the offsets to # know where to put them. - # + # # 2: Put the encrypted private key. Use: # # put_encprivkey(encprivkey) @@ -644,7 +644,7 @@ # # 3: We're now in a position to upload the block hash tree for # a share. Put that using something like: - # + # # put_blockhashes(block_hash_tree) # # Note that block_hash_tree is a list of hashes -- we'll take @@ -655,20 +655,20 @@ # # 4: We're now in a position to upload the share hash chain for # a share. Do that with something like: - # - # put_sharehashes(share_hash_chain) # - # share_hash_chain should be a dictionary mapping shnums to + # put_sharehashes(share_hash_chain) + # + # share_hash_chain should be a dictionary mapping shnums to # 32-byte hashes -- the wrapper handles serialization. # We'll know where to put the signature at this point, also. # The root of this tree will be put explicitly in the next # step. - # + # # 5: Before putting the signature, we must first put the # root_hash. Do this with: - # + # # put_root_hash(root_hash). - # + # # In terms of knowing where to put this value, it was always # possible to place it, but it makes sense semantically to # place it after the share hash tree, so that's why you do it @@ -679,27 +679,27 @@ # get_signable() # # to get the part of the header that you want to sign, and use: - # + # # put_signature(signature) # # to write your signature to the remote server. # # 6: Add the verification key, and finish. Do: # - # put_verification_key(key) + # put_verification_key(key) # - # and + # and # # finish_publish() # # Checkstring management: - # + # # To write to a mutable slot, we have to provide test vectors to ensure # that we are writing to the same data that we think we are. These # vectors allow us to detect uncoordinated writes; that is, writes # where both we and some other shareholder are writing to the # mutable slot, and to report those back to the parts of the program - # doing the writing. + # doing the writing. # # With SDMF, this was easy -- all of the share data was written in # one go, so it was easy to detect uncoordinated writes, and we only @@ -724,7 +724,7 @@ # - When we write out the salt hash # - When we write out the root of the share hash tree # - # since these values will change the header. It is possible that we + # since these values will change the header. It is possible that we # can just make those be written in one operation to minimize # disruption. def __init__(self, @@ -745,7 +745,7 @@ assert self.shnum >= 0 and self.shnum < total_shares self._total_shares = total_shares # We build up the offset table as we write things. It is the - # last thing we write to the remote server. + # last thing we write to the remote server. self._offsets = {} self._testvs = [] # This is a list of write vectors that will be sent to our @@ -1010,7 +1010,7 @@ Put the root hash (the root of the share hash tree) in the remote slot. """ - # It does not make sense to be able to put the root + # It does not make sense to be able to put the root # hash without first putting the share hashes, since you need # the share hashes to generate the root hash. # @@ -1192,7 +1192,8 @@ rref, storage_index, shnum, - data=""): + data="", + data_is_everything=False): # Start the initialization process. self._rref = rref self._storage_index = storage_index @@ -1223,8 +1224,14 @@ # If the user has chosen to initialize us with some data, we'll # try to satisfy subsequent data requests with that data before - # asking the storage server for it. If + # asking the storage server for it. self._data = data + + # If the provided data is known to be complete, then we know there's + # nothing to be gained by querying the server, so we should just + # partially satisfy requests with what we have. + self._data_is_everything = data_is_everything + # The way callers interact with cache in the filenode returns # None if there isn't any cached data, but the way we index the # cached data requires a string, so convert None to "". @@ -1240,7 +1247,7 @@ """ if self._offsets: return defer.succeed(None) - # At this point, we may be either SDMF or MDMF. Fetching 107 + # At this point, we may be either SDMF or MDMF. Fetching 107 # bytes will be enough to get header and offsets for both SDMF and # MDMF, though we'll be left with 4 more bytes than we # need if this ends up being MDMF. This is probably less @@ -1738,7 +1745,8 @@ # TODO: It's entirely possible to tweak this so that it just # fulfills the requests that it can, and not demand that all # requests are satisfiable before running it. - if not unsatisfiable and not force_remote: + + if not unsatisfiable or self._data_is_everything: results = [self._data[offset:offset+length] for (offset, length) in readvs] results = {self.shnum: results} diff -Nru tahoe-lafs-1.9.2/src/allmydata/mutable/publish.py tahoe-lafs-1.10.0/src/allmydata/mutable/publish.py --- tahoe-lafs-1.9.2/src/allmydata/mutable/publish.py 2012-05-14 02:50:17.000000000 +0000 +++ tahoe-lafs-1.10.0/src/allmydata/mutable/publish.py 2013-09-03 15:38:27.000000000 +0000 @@ -253,7 +253,9 @@ # updating, we ignore damaged and missing shares -- callers must # do a repair to repair and recreate these. self.goal = set(self._servermap.get_known_shares()) - self.writers = {} + + # shnum -> set of IMutableSlotWriter + self.writers = DictOfSets() # SDMF files are updated differently. self._version = MDMF_VERSION @@ -278,7 +280,7 @@ self.segment_size, self.datalength) - self.writers.setdefault(shnum, []).append(writer) + self.writers.add(shnum, writer) writer.server = server known_shares = self._servermap.get_known_shares() assert (server, shnum) in known_shares @@ -294,7 +296,7 @@ # after we are done writing share data and have started to write # blocks. In the meantime, we need to know what to look for when # writing, so that we can detect UncoordinatedWriteErrors. - self._checkstring = self.writers.values()[0][0].get_checkstring() + self._checkstring = self._get_some_writer().get_checkstring() # Now, we start pushing shares. self._status.timings["setup"] = time.time() - self._started @@ -330,7 +332,7 @@ # These are filled in later, after we've modified the block hash # tree suitably. self.sharehash_leaves = None # eventually [sharehashes] - self.sharehashes = {} # shnum -> [sharehash leaves necessary to + self.sharehashes = {} # shnum -> [sharehash leaves necessary to # validate the share] self.log("Starting push") @@ -452,7 +454,10 @@ # TODO: Make this part do server selection. self.update_goal() - self.writers = {} + + # shnum -> set of IMutableSlotWriter + self.writers = DictOfSets() + if self._version == MDMF_VERSION: writer_class = MDMFSlotWriteProxy else: @@ -476,7 +481,7 @@ self.total_shares, self.segment_size, self.datalength) - self.writers.setdefault(shnum, []).append(writer) + self.writers.add(shnum, writer) writer.server = server known_shares = self._servermap.get_known_shares() if (server, shnum) in known_shares: @@ -495,7 +500,7 @@ # after we are done writing share data and have started to write # blocks. In the meantime, we need to know what to look for when # writing, so that we can detect UncoordinatedWriteErrors. - self._checkstring = self.writers.values()[0][0].get_checkstring() + self._checkstring = self._get_some_writer().get_checkstring() # Now, we start pushing shares. self._status.timings["setup"] = time.time() - self._started @@ -511,7 +516,7 @@ for j in xrange(self.num_segments): blocks.append(None) self.sharehash_leaves = None # eventually [sharehashes] - self.sharehashes = {} # shnum -> [sharehash leaves necessary to + self.sharehashes = {} # shnum -> [sharehash leaves necessary to # validate the share] self.log("Starting push") @@ -521,6 +526,8 @@ return self.done_deferred + def _get_some_writer(self): + return list(self.writers.values()[0])[0] def _update_status(self): self._status.set_status("Sending Shares: %d placed out of %d, " @@ -622,9 +629,8 @@ # Can we still successfully publish this file? # TODO: Keep track of outstanding queries before aborting the # process. - all_shnums = filter(lambda sh: len(self.writers[sh]) > 0, - self.writers.iterkeys()) - if len(all_shnums) < self.required_shares or self.surprised: + num_shnums = len(self.writers) + if num_shnums < self.required_shares or self.surprised: return self._failure() # Figure out what we need to do next. Each of these needs to @@ -835,7 +841,7 @@ uncoordinated writes. SDMF files will have the same checkstring, so we need not do anything. """ - self._checkstring = self.writers.values()[0][0].get_checkstring() + self._checkstring = self._get_some_writer().get_checkstring() def _make_and_place_signature(self): @@ -844,7 +850,7 @@ """ started = time.time() self._status.set_status("Signing prefix") - signable = self.writers.values()[0][0].get_signable() + signable = self._get_some_writer().get_signable() self.signature = self._privkey.sign(signable) for (shnum, writers) in self.writers.iteritems(): @@ -881,7 +887,7 @@ def _record_verinfo(self): - self.versioninfo = self.writers.values()[0][0].get_verinfo() + self.versioninfo = self._get_some_writer().get_verinfo() def _connection_problem(self, f, writer): @@ -891,7 +897,7 @@ """ self.log("found problem: %s" % str(f)) self._last_failure = f - self.writers[writer.shnum].remove(writer) + self.writers.discard(writer.shnum, writer) def log_goal(self, goal, message=""): @@ -1224,7 +1230,7 @@ old_position = self._filehandle.tell() # Seek to the end of the file by seeking 0 bytes from the # file's end - self._filehandle.seek(0, 2) # 2 == os.SEEK_END in 2.5+ + self._filehandle.seek(0, os.SEEK_END) self._size = self._filehandle.tell() # Restore the previous position, in case this was called # after a read. @@ -1318,7 +1324,7 @@ def read(self, length): - # We can get data from 3 sources here. + # We can get data from 3 sources here. # 1. The first of the segments provided to us. # 2. The data that we're replacing things with. # 3. The last of the segments provided to us. diff -Nru tahoe-lafs-1.9.2/src/allmydata/mutable/repairer.py tahoe-lafs-1.10.0/src/allmydata/mutable/repairer.py --- tahoe-lafs-1.9.2/src/allmydata/mutable/repairer.py 2012-05-14 02:50:17.000000000 +0000 +++ tahoe-lafs-1.10.0/src/allmydata/mutable/repairer.py 2013-09-03 15:38:27.000000000 +0000 @@ -28,7 +28,7 @@ def __init__(self, node, check_results, storage_broker, history, monitor): self.node = node self.check_results = ICheckResults(check_results) - assert check_results.storage_index == self.node.get_storage_index() + assert check_results.get_storage_index() == node.get_storage_index() self._storage_broker = storage_broker self._history = history self._monitor = monitor diff -Nru tahoe-lafs-1.9.2/src/allmydata/mutable/retrieve.py tahoe-lafs-1.10.0/src/allmydata/mutable/retrieve.py --- tahoe-lafs-1.9.2/src/allmydata/mutable/retrieve.py 2012-07-03 16:32:46.000000000 +0000 +++ tahoe-lafs-1.10.0/src/allmydata/mutable/retrieve.py 2013-09-03 15:38:27.000000000 +0000 @@ -130,7 +130,7 @@ # verify means that we are using the downloader logic to verify all # of our shares. This tells the downloader a few things. - # + # # 1. We need to download all of the shares. # 2. We don't need to decode or decrypt the shares, since our # caller doesn't care about the plaintext, only the @@ -286,16 +286,14 @@ self.remaining_sharemap = DictOfSets() for (shnum, server, timestamp) in shares: self.remaining_sharemap.add(shnum, server) - # If the servermap update fetched anything, it fetched at least 1 - # KiB, so we ask for that much. - # TODO: Change the cache methods to allow us to fetch all of the - # data that they have, then change this method to do that. - any_cache = self._node._read_from_cache(self.verinfo, shnum, - 0, 1000) - reader = MDMFSlotReadProxy(server.get_rref(), - self._storage_index, - shnum, - any_cache) + # Reuse the SlotReader from the servermap. + key = (self.verinfo, server.get_serverid(), + self._storage_index, shnum) + if key in self.servermap.proxies: + reader = self.servermap.proxies[key] + else: + reader = MDMFSlotReadProxy(server.get_rref(), + self._storage_index, shnum, None) reader.server = server self.readers[shnum] = reader assert len(self.remaining_sharemap) >= k @@ -394,7 +392,7 @@ # Our last task is to tell the downloader where to start and # where to stop. We use three parameters for that: # - self._start_segment: the segment that we need to start - # downloading from. + # downloading from. # - self._current_segment: the next segment that we need to # download. # - self._last_segment: The last segment that we were asked to @@ -407,7 +405,7 @@ if self._offset: self.log("got offset: %d" % self._offset) # our start segment is the first segment containing the - # offset we were given. + # offset we were given. start = self._offset // self._segment_size assert start < self._num_segments @@ -766,6 +764,7 @@ block_and_salt, blockhashes, sharehashes = results block, salt = block_and_salt + assert type(block) is str, (block, salt) blockhashes = dict(enumerate(blockhashes)) self.log("the reader gave me the following blockhashes: %s" % \ @@ -799,7 +798,7 @@ # Reaching this point means that we know that this segment # is correct. Now we need to check to see whether the share - # hash chain is also correct. + # hash chain is also correct. # SDMF wrote share hash chains that didn't contain the # leaves, which would be produced from the block hash tree. # So we need to validate the block hash tree first. If @@ -838,12 +837,13 @@ #needed.discard(0) self.log("getting blockhashes for segment %d, share %d: %s" % \ (segnum, reader.shnum, str(needed))) - d1 = reader.get_blockhashes(needed, force_remote=True) + # TODO is force_remote necessary here? + d1 = reader.get_blockhashes(needed, force_remote=False) if self.share_hash_tree.needed_hashes(reader.shnum): need = self.share_hash_tree.needed_hashes(reader.shnum) self.log("also need sharehashes for share %d: %s" % (reader.shnum, str(need))) - d2 = reader.get_sharehashes(need, force_remote=True) + d2 = reader.get_sharehashes(need, force_remote=False) else: d2 = defer.succeed({}) # the logic in the next method # expects a dict diff -Nru tahoe-lafs-1.9.2/src/allmydata/mutable/servermap.py tahoe-lafs-1.10.0/src/allmydata/mutable/servermap.py --- tahoe-lafs-1.9.2/src/allmydata/mutable/servermap.py 2012-05-14 02:50:17.000000000 +0000 +++ tahoe-lafs-1.10.0/src/allmydata/mutable/servermap.py 2013-09-03 15:38:27.000000000 +0000 @@ -1,5 +1,5 @@ -import sys, time +import sys, time, copy from zope.interface import implements from itertools import count from twisted.internet import defer @@ -119,6 +119,7 @@ self._bad_shares = {} # maps (server,shnum) to old checkstring self._last_update_mode = None self._last_update_time = 0 + self.proxies = {} self.update_data = {} # shnum -> [(verinfo,(blockhashes,start,end)),..] # where blockhashes is a list of bytestrings (the result of # layout.MDMFSlotReadProxy.get_blockhashes), and start/end are both @@ -133,6 +134,7 @@ s._bad_shares = self._bad_shares.copy() # tuple->str s._last_update_mode = self._last_update_mode s._last_update_time = self._last_update_time + s.update_data = copy.deepcopy(self.update_data) return s def get_reachable_servers(self): @@ -631,19 +633,6 @@ self._servermap.add_problem(f) - def _cache_good_sharedata(self, verinfo, shnum, now, data): - """ - If one of my queries returns successfully (which means that we - were able to and successfully did validate the signature), I - cache the data that we initially fetched from the storage - server. This will help reduce the number of roundtrips that need - to occur when the file is downloaded, or when the file is - updated. - """ - if verinfo: - self._node._add_to_cache(verinfo, shnum, 0, data) - - def _got_results(self, datavs, server, readsize, storage_index, started): lp = self.log(format="got result from [%(name)s], %(numshares)d shares", name=server.get_name(), @@ -675,7 +664,9 @@ reader = MDMFSlotReadProxy(ss, storage_index, shnum, - data) + data, + data_is_everything=(len(data) < readsize)) + # our goal, with each response, is to validate the version # information and share data as best we can at this point -- # we do this by validating the signature. To do this, we @@ -747,13 +738,21 @@ d5 = defer.succeed(None) dl = defer.DeferredList([d, d2, d3, d4, d5]) + def _append_proxy(passthrough, shnum=shnum, reader=reader): + # Store the proxy (with its cache) keyed by serverid and + # version. + _, (_,verinfo), _, _, _ = passthrough + verinfo = self._make_verinfo_hashable(verinfo) + self._servermap.proxies[(verinfo, + server.get_serverid(), + storage_index, shnum)] = reader + return passthrough + dl.addCallback(_append_proxy) dl.addBoth(self._turn_barrier) dl.addCallback(lambda results, shnum=shnum: self._got_signature_one_share(results, shnum, server, lp)) dl.addErrback(lambda error, shnum=shnum, data=data: self._got_corrupt_share(error, shnum, server, data, lp)) - dl.addCallback(lambda verinfo, shnum=shnum, data=data: - self._cache_good_sharedata(verinfo, shnum, now, data)) ds.append(dl) # dl is a deferred list that will fire when all of the shares # that we found on this server are done processing. When dl fires, @@ -817,6 +816,10 @@ return None _, verinfo, signature, __, ___ = results + verinfo = self._make_verinfo_hashable(verinfo[1]) + + # This tuple uniquely identifies a share on the grid; we use it + # to keep track of the ones that we've already seen. (seqnum, root_hash, saltish, @@ -825,22 +828,8 @@ k, n, prefix, - offsets) = verinfo[1] - offsets_tuple = tuple( [(key,value) for key,value in offsets.items()] ) + offsets_tuple) = verinfo - # XXX: This should be done for us in the method, so - # presumably you can go in there and fix it. - verinfo = (seqnum, - root_hash, - saltish, - segsize, - datalen, - k, - n, - prefix, - offsets_tuple) - # This tuple uniquely identifies a share on the grid; we use it - # to keep track of the ones that we've already seen. if verinfo not in self._valid_versions: # This is a new version tuple, and we need to validate it @@ -879,13 +868,7 @@ return verinfo - - def _got_update_results_one_share(self, results, share): - """ - I record the update results in results. - """ - assert len(results) == 4 - verinfo, blockhashes, start, end = results + def _make_verinfo_hashable(self, verinfo): (seqnum, root_hash, saltish, @@ -895,10 +878,9 @@ n, prefix, offsets) = verinfo + offsets_tuple = tuple( [(key,value) for key,value in offsets.items()] ) - # XXX: This should be done for us in the method, so - # presumably you can go in there and fix it. verinfo = (seqnum, root_hash, saltish, @@ -908,7 +890,15 @@ n, prefix, offsets_tuple) + return verinfo + def _got_update_results_one_share(self, results, share): + """ + I record the update results in results. + """ + assert len(results) == 4 + verinfo, blockhashes, start, end = results + verinfo = self._make_verinfo_hashable(verinfo) update_data = (blockhashes, start, end) self._servermap.set_update_data_for_share_and_verinfo(share, verinfo, diff -Nru tahoe-lafs-1.9.2/src/allmydata/node.py tahoe-lafs-1.10.0/src/allmydata/node.py --- tahoe-lafs-1.9.2/src/allmydata/node.py 2012-05-14 02:24:30.000000000 +0000 +++ tahoe-lafs-1.10.0/src/allmydata/node.py 2013-09-03 15:38:27.000000000 +0000 @@ -209,14 +209,43 @@ # TODO: merge this with allmydata.get_package_versions return dict(app_versions.versions) + def get_config_from_file(self, name, required=False): + """Get the (string) contents of a config file, or None if the file + did not exist. If required=True, raise an exception rather than + returning None. Any leading or trailing whitespace will be stripped + from the data.""" + fn = os.path.join(self.basedir, name) + try: + return fileutil.read(fn).strip() + except EnvironmentError: + if not required: + return None + raise + def write_private_config(self, name, value): """Write the (string) contents of a private config file (which is a config file that resides within the subdirectory named 'private'), and - return it. Any leading or trailing whitespace will be stripped from - the data. + return it. + """ + privname = os.path.join(self.basedir, "private", name) + open(privname, "w").write(value) + + def get_private_config(self, name, default=_None): + """Read the (string) contents of a private config file (which is a + config file that resides within the subdirectory named 'private'), + and return it. Return a default, or raise an error if one was not + given. """ privname = os.path.join(self.basedir, "private", name) - open(privname, "w").write(value.strip()) + try: + return fileutil.read(privname) + except EnvironmentError: + if os.path.exists(privname): + raise + if default is _None: + raise MissingConfigEntry("The required configuration file %s is missing." + % (quote_output(privname),)) + return default def get_or_create_private_config(self, name, default=_None): """Try to get the (string) contents of a private config file (which @@ -250,7 +279,7 @@ """Write a string to a config file.""" fn = os.path.join(self.basedir, name) try: - open(fn, mode).write(value) + fileutil.write(fn, value, mode) except EnvironmentError, e: self.log("Unable to write config file '%s'" % fn) self.log(e) @@ -347,7 +376,7 @@ portnum = l.getPortnum() # record which port we're listening on, so we can grab the same one # next time - open(self._portnumfile, "w").write("%d\n" % portnum) + fileutil.write_atomically(self._portnumfile, "%d\n" % portnum, mode="") base_location = ",".join([ "%s:%d" % (addr, portnum) for addr in local_addresses ]) diff -Nru tahoe-lafs-1.9.2/src/allmydata/nodemaker.py tahoe-lafs-1.10.0/src/allmydata/nodemaker.py --- tahoe-lafs-1.9.2/src/allmydata/nodemaker.py 2012-05-14 02:07:23.000000000 +0000 +++ tahoe-lafs-1.10.0/src/allmydata/nodemaker.py 2013-09-03 15:38:27.000000000 +0000 @@ -72,12 +72,16 @@ cap = uri.from_string(bigcap, deep_immutable=deep_immutable, name=name) node = self._create_from_single_cap(cap) - if node: - self._node_cache[memokey] = node # note: WeakValueDictionary - else: + + # node is None for an unknown URI, otherwise it is a type for which + # is_mutable() is known. We avoid cacheing mutable nodes due to + # ticket #1679. + if node is None: # don't cache UnknownNode node = UnknownNode(writecap, readcap, deep_immutable=deep_immutable, name=name) + elif node.is_mutable(): + self._node_cache[memokey] = node # note: WeakValueDictionary if self.blacklist: si = node.get_storage_index() @@ -139,6 +143,7 @@ packed = pack_children(children, None, deep_immutable=True) uploadable = Data(packed, convergence) d = self.uploader.upload(uploadable) - d.addCallback(lambda results: self.create_from_cap(None, results.uri)) + d.addCallback(lambda results: + self.create_from_cap(None, results.get_uri())) d.addCallback(self._create_dirnode) return d diff -Nru tahoe-lafs-1.9.2/src/allmydata/provisioning.py tahoe-lafs-1.10.0/src/allmydata/provisioning.py --- tahoe-lafs-1.9.2/src/allmydata/provisioning.py 2012-05-14 02:24:30.000000000 +0000 +++ tahoe-lafs-1.10.0/src/allmydata/provisioning.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,772 +0,0 @@ - -from nevow import inevow, rend, tags as T -import math -from allmydata.util import mathutil -from allmydata.web.common import getxmlfile - -# factorial and binomial copied from -# http://mail.python.org/pipermail/python-list/2007-April/435718.html - -def factorial(n): - """factorial(n): return the factorial of the integer n. - factorial(0) = 1 - factorial(n) with n<0 is -factorial(abs(n)) - """ - result = 1 - for i in xrange(1, abs(n)+1): - result *= i - assert n >= 0 - return result - -def binomial(n, k): - assert 0 <= k <= n - if k == 0 or k == n: - return 1 - # calculate n!/k! as one product, avoiding factors that - # just get canceled - P = k+1 - for i in xrange(k+2, n+1): - P *= i - # if you are paranoid: - # C, rem = divmod(P, factorial(n-k)) - # assert rem == 0 - # return C - return P//factorial(n-k) - -class ProvisioningTool(rend.Page): - addSlash = True - docFactory = getxmlfile("provisioning.xhtml") - - def render_forms(self, ctx, data): - req = inevow.IRequest(ctx) - - def getarg(name, astype=int): - if req.method != "POST": - return None - if name in req.fields: - return astype(req.fields[name].value) - return None - return self.do_forms(getarg) - - - def do_forms(self, getarg): - filled = getarg("filled", bool) - - def get_and_set(name, options, default=None, astype=int): - current_value = getarg(name, astype) - i_select = T.select(name=name) - for (count, description) in options: - count = astype(count) - if ((current_value is not None and count == current_value) or - (current_value is None and count == default)): - o = T.option(value=str(count), selected="true")[description] - else: - o = T.option(value=str(count))[description] - i_select = i_select[o] - if current_value is None: - current_value = default - return current_value, i_select - - sections = {} - def add_input(section, text, entry): - if section not in sections: - sections[section] = [] - sections[section].extend([T.div[text, ": ", entry], "\n"]) - - def add_output(section, entry): - if section not in sections: - sections[section] = [] - sections[section].extend([entry, "\n"]) - - def build_section(section): - return T.fieldset[T.legend[section], sections[section]] - - def number(value, suffix=""): - scaling = 1 - if value < 1: - fmt = "%1.2g%s" - elif value < 100: - fmt = "%.1f%s" - elif value < 1000: - fmt = "%d%s" - elif value < 1e6: - fmt = "%.2fk%s"; scaling = 1e3 - elif value < 1e9: - fmt = "%.2fM%s"; scaling = 1e6 - elif value < 1e12: - fmt = "%.2fG%s"; scaling = 1e9 - elif value < 1e15: - fmt = "%.2fT%s"; scaling = 1e12 - elif value < 1e18: - fmt = "%.2fP%s"; scaling = 1e15 - else: - fmt = "huge! %g%s" - return fmt % (value / scaling, suffix) - - user_counts = [(5, "5 users"), - (50, "50 users"), - (200, "200 users"), - (1000, "1k users"), - (10000, "10k users"), - (50000, "50k users"), - (100000, "100k users"), - (500000, "500k users"), - (1000000, "1M users"), - ] - num_users, i_num_users = get_and_set("num_users", user_counts, 50000) - add_input("Users", - "How many users are on this network?", i_num_users) - - files_per_user_counts = [(100, "100 files"), - (1000, "1k files"), - (10000, "10k files"), - (100000, "100k files"), - (1e6, "1M files"), - ] - files_per_user, i_files_per_user = get_and_set("files_per_user", - files_per_user_counts, - 1000) - add_input("Users", - "How many files for each user? (avg)", - i_files_per_user) - - space_per_user_sizes = [(1e6, "1MB"), - (10e6, "10MB"), - (100e6, "100MB"), - (200e6, "200MB"), - (1e9, "1GB"), - (2e9, "2GB"), - (5e9, "5GB"), - (10e9, "10GB"), - (100e9, "100GB"), - (1e12, "1TB"), - (2e12, "2TB"), - (5e12, "5TB"), - ] - # Estimate ~5gb per user as a more realistic case - space_per_user, i_space_per_user = get_and_set("space_per_user", - space_per_user_sizes, - 5e9) - add_input("Users", - "How much data for each user? (avg)", - i_space_per_user) - - sharing_ratios = [(1.0, "1.0x"), - (1.1, "1.1x"), - (2.0, "2.0x"), - ] - sharing_ratio, i_sharing_ratio = get_and_set("sharing_ratio", - sharing_ratios, 1.0, - float) - add_input("Users", - "What is the sharing ratio? (1.0x is no-sharing and" - " no convergence)", i_sharing_ratio) - - # Encoding parameters - encoding_choices = [("3-of-10-5", "3.3x (3-of-10, repair below 5)"), - ("3-of-10-8", "3.3x (3-of-10, repair below 8)"), - ("5-of-10-7", "2x (5-of-10, repair below 7)"), - ("8-of-10-9", "1.25x (8-of-10, repair below 9)"), - ("27-of-30-28", "1.1x (27-of-30, repair below 28"), - ("25-of-100-50", "4x (25-of-100, repair below 50)"), - ] - encoding_parameters, i_encoding_parameters = \ - get_and_set("encoding_parameters", - encoding_choices, "3-of-10-5", str) - encoding_pieces = encoding_parameters.split("-") - k = int(encoding_pieces[0]) - assert encoding_pieces[1] == "of" - n = int(encoding_pieces[2]) - # we repair the file when the number of available shares drops below - # this value - repair_threshold = int(encoding_pieces[3]) - - add_input("Servers", - "What are the default encoding parameters?", - i_encoding_parameters) - - # Server info - num_server_choices = [ (5, "5 servers"), - (10, "10 servers"), - (15, "15 servers"), - (30, "30 servers"), - (50, "50 servers"), - (100, "100 servers"), - (200, "200 servers"), - (300, "300 servers"), - (500, "500 servers"), - (1000, "1k servers"), - (2000, "2k servers"), - (5000, "5k servers"), - (10e3, "10k servers"), - (100e3, "100k servers"), - (1e6, "1M servers"), - ] - num_servers, i_num_servers = \ - get_and_set("num_servers", num_server_choices, 30, int) - add_input("Servers", - "How many servers are there?", i_num_servers) - - # availability is measured in dBA = -dBF, where 0dBF is 100% failure, - # 10dBF is 10% failure, 20dBF is 1% failure, etc - server_dBA_choices = [ (10, "90% [10dBA] (2.4hr/day)"), - (13, "95% [13dBA] (1.2hr/day)"), - (20, "99% [20dBA] (14min/day or 3.5days/year)"), - (23, "99.5% [23dBA] (7min/day or 1.75days/year)"), - (30, "99.9% [30dBA] (87sec/day or 9hours/year)"), - (40, "99.99% [40dBA] (60sec/week or 53min/year)"), - (50, "99.999% [50dBA] (5min per year)"), - ] - server_dBA, i_server_availability = \ - get_and_set("server_availability", - server_dBA_choices, - 20, int) - add_input("Servers", - "What is the server availability?", i_server_availability) - - drive_MTBF_choices = [ (40, "40,000 Hours"), - ] - drive_MTBF, i_drive_MTBF = \ - get_and_set("drive_MTBF", drive_MTBF_choices, 40, int) - add_input("Drives", - "What is the hard drive MTBF?", i_drive_MTBF) - # http://www.tgdaily.com/content/view/30990/113/ - # http://labs.google.com/papers/disk_failures.pdf - # google sees: - # 1.7% of the drives they replaced were 0-1 years old - # 8% of the drives they repalced were 1-2 years old - # 8.6% were 2-3 years old - # 6% were 3-4 years old, about 8% were 4-5 years old - - drive_size_choices = [ (100, "100 GB"), - (250, "250 GB"), - (500, "500 GB"), - (750, "750 GB"), - (1000, "1000 GB"), - (2000, "2000 GB"), - (3000, "3000 GB"), - ] - drive_size, i_drive_size = \ - get_and_set("drive_size", drive_size_choices, 3000, int) - drive_size = drive_size * 1e9 - add_input("Drives", - "What is the capacity of each hard drive?", i_drive_size) - drive_failure_model_choices = [ ("E", "Exponential"), - ("U", "Uniform"), - ] - drive_failure_model, i_drive_failure_model = \ - get_and_set("drive_failure_model", - drive_failure_model_choices, - "E", str) - add_input("Drives", - "How should we model drive failures?", i_drive_failure_model) - - # drive_failure_rate is in failures per second - if drive_failure_model == "E": - drive_failure_rate = 1.0 / (drive_MTBF * 1000 * 3600) - else: - drive_failure_rate = 0.5 / (drive_MTBF * 1000 * 3600) - - # deletion/gc/ownership mode - ownership_choices = [ ("A", "no deletion, no gc, no owners"), - ("B", "deletion, no gc, no owners"), - ("C", "deletion, share timers, no owners"), - ("D", "deletion, no gc, yes owners"), - ("E", "deletion, owner timers"), - ] - ownership_mode, i_ownership_mode = \ - get_and_set("ownership_mode", ownership_choices, - "A", str) - add_input("Servers", - "What is the ownership mode?", i_ownership_mode) - - # client access behavior - access_rates = [ (1, "one file per day"), - (10, "10 files per day"), - (100, "100 files per day"), - (1000, "1k files per day"), - (10e3, "10k files per day"), - (100e3, "100k files per day"), - ] - download_files_per_day, i_download_rate = \ - get_and_set("download_rate", access_rates, - 100, int) - add_input("Users", - "How many files are downloaded per day?", i_download_rate) - download_rate = 1.0 * download_files_per_day / (24*60*60) - - upload_files_per_day, i_upload_rate = \ - get_and_set("upload_rate", access_rates, - 10, int) - add_input("Users", - "How many files are uploaded per day?", i_upload_rate) - upload_rate = 1.0 * upload_files_per_day / (24*60*60) - - delete_files_per_day, i_delete_rate = \ - get_and_set("delete_rate", access_rates, - 10, int) - add_input("Users", - "How many files are deleted per day?", i_delete_rate) - delete_rate = 1.0 * delete_files_per_day / (24*60*60) - - - # the value is in days - lease_timers = [ (1, "one refresh per day"), - (7, "one refresh per week"), - ] - lease_timer, i_lease = \ - get_and_set("lease_timer", lease_timers, - 7, int) - add_input("Users", - "How frequently do clients refresh files or accounts? " - "(if necessary)", - i_lease) - seconds_per_lease = 24*60*60*lease_timer - - check_timer_choices = [ (1, "every week"), - (4, "every month"), - (8, "every two months"), - (16, "every four months"), - ] - check_timer, i_check_timer = \ - get_and_set("check_timer", check_timer_choices, 4, int) - add_input("Users", - "How frequently should we check on each file?", - i_check_timer) - file_check_interval = check_timer * 7 * 24 * 3600 - - - if filled: - add_output("Users", T.div["Total users: %s" % number(num_users)]) - add_output("Users", - T.div["Files per user: %s" % number(files_per_user)]) - file_size = 1.0 * space_per_user / files_per_user - add_output("Users", - T.div["Average file size: ", number(file_size)]) - total_files = num_users * files_per_user / sharing_ratio - - add_output("Grid", - T.div["Total number of files in grid: ", - number(total_files)]) - total_space = num_users * space_per_user / sharing_ratio - add_output("Grid", - T.div["Total volume of plaintext in grid: ", - number(total_space, "B")]) - - total_shares = n * total_files - add_output("Grid", - T.div["Total shares in grid: ", number(total_shares)]) - expansion = float(n) / float(k) - - total_usage = expansion * total_space - add_output("Grid", - T.div["Share data in grid: ", number(total_usage, "B")]) - - if n > num_servers: - # silly configuration, causes Tahoe2 to wrap and put multiple - # shares on some servers. - add_output("Servers", - T.div["non-ideal: more shares than servers" - " (n=%d, servers=%d)" % (n, num_servers)]) - # every file has at least one share on every server - buckets_per_server = total_files - shares_per_server = total_files * ((1.0 * n) / num_servers) - else: - # if nobody is full, then no lease requests will be turned - # down for lack of space, and no two shares for the same file - # will share a server. Therefore the chance that any given - # file has a share on any given server is n/num_servers. - buckets_per_server = total_files * ((1.0 * n) / num_servers) - # since each such represented file only puts one share on a - # server, the total number of shares per server is the same. - shares_per_server = buckets_per_server - add_output("Servers", - T.div["Buckets per server: ", - number(buckets_per_server)]) - add_output("Servers", - T.div["Shares per server: ", - number(shares_per_server)]) - - # how much space is used on the storage servers for the shares? - # the share data itself - share_data_per_server = total_usage / num_servers - add_output("Servers", - T.div["Share data per server: ", - number(share_data_per_server, "B")]) - # this is determined empirically. H=hashsize=32, for a one-segment - # file and 3-of-10 encoding - share_validation_per_server = 266 * shares_per_server - # this could be 423*buckets_per_server, if we moved the URI - # extension into a separate file, but that would actually consume - # *more* space (minimum filesize is 4KiB), unless we moved all - # shares for a given bucket into a single file. - share_uri_extension_per_server = 423 * shares_per_server - - # ownership mode adds per-bucket data - H = 32 # depends upon the desired security of delete/refresh caps - # bucket_lease_size is the amount of data needed to keep track of - # the delete/refresh caps for each bucket. - bucket_lease_size = 0 - client_bucket_refresh_rate = 0 - owner_table_size = 0 - if ownership_mode in ("B", "C", "D", "E"): - bucket_lease_size = sharing_ratio * 1.0 * H - if ownership_mode in ("B", "C"): - # refreshes per second per client - client_bucket_refresh_rate = (1.0 * n * files_per_user / - seconds_per_lease) - add_output("Users", - T.div["Client share refresh rate (outbound): ", - number(client_bucket_refresh_rate, "Hz")]) - server_bucket_refresh_rate = (client_bucket_refresh_rate * - num_users / num_servers) - add_output("Servers", - T.div["Server share refresh rate (inbound): ", - number(server_bucket_refresh_rate, "Hz")]) - if ownership_mode in ("D", "E"): - # each server must maintain a bidirectional mapping from - # buckets to owners. One way to implement this would be to - # put a list of four-byte owner numbers into each bucket, and - # a list of four-byte share numbers into each owner (although - # of course we'd really just throw it into a database and let - # the experts take care of the details). - owner_table_size = 2*(buckets_per_server * sharing_ratio * 4) - - if ownership_mode in ("E",): - # in this mode, clients must refresh one timer per server - client_account_refresh_rate = (1.0 * num_servers / - seconds_per_lease) - add_output("Users", - T.div["Client account refresh rate (outbound): ", - number(client_account_refresh_rate, "Hz")]) - server_account_refresh_rate = (client_account_refresh_rate * - num_users / num_servers) - add_output("Servers", - T.div["Server account refresh rate (inbound): ", - number(server_account_refresh_rate, "Hz")]) - - # TODO: buckets vs shares here is a bit wonky, but in - # non-wrapping grids it shouldn't matter - share_lease_per_server = bucket_lease_size * buckets_per_server - share_ownertable_per_server = owner_table_size - - share_space_per_server = (share_data_per_server + - share_validation_per_server + - share_uri_extension_per_server + - share_lease_per_server + - share_ownertable_per_server) - add_output("Servers", - T.div["Share space per server: ", - number(share_space_per_server, "B"), - " (data ", - number(share_data_per_server, "B"), - ", validation ", - number(share_validation_per_server, "B"), - ", UEB ", - number(share_uri_extension_per_server, "B"), - ", lease ", - number(share_lease_per_server, "B"), - ", ownertable ", - number(share_ownertable_per_server, "B"), - ")", - ]) - - - # rates - client_download_share_rate = download_rate * k - client_download_byte_rate = download_rate * file_size - add_output("Users", - T.div["download rate: shares = ", - number(client_download_share_rate, "Hz"), - " , bytes = ", - number(client_download_byte_rate, "Bps"), - ]) - total_file_check_rate = 1.0 * total_files / file_check_interval - client_check_share_rate = total_file_check_rate / num_users - add_output("Users", - T.div["file check rate: shares = ", - number(client_check_share_rate, "Hz"), - " (interval = %s)" % - number(1 / client_check_share_rate, "s"), - ]) - - client_upload_share_rate = upload_rate * n - # TODO: doesn't include overhead - client_upload_byte_rate = upload_rate * file_size * expansion - add_output("Users", - T.div["upload rate: shares = ", - number(client_upload_share_rate, "Hz"), - " , bytes = ", - number(client_upload_byte_rate, "Bps"), - ]) - client_delete_share_rate = delete_rate * n - - server_inbound_share_rate = (client_upload_share_rate * - num_users / num_servers) - server_inbound_byte_rate = (client_upload_byte_rate * - num_users / num_servers) - add_output("Servers", - T.div["upload rate (inbound): shares = ", - number(server_inbound_share_rate, "Hz"), - " , bytes = ", - number(server_inbound_byte_rate, "Bps"), - ]) - add_output("Servers", - T.div["share check rate (inbound): ", - number(total_file_check_rate * n / num_servers, - "Hz"), - ]) - - server_share_modify_rate = ((client_upload_share_rate + - client_delete_share_rate) * - num_users / num_servers) - add_output("Servers", - T.div["share modify rate: shares = ", - number(server_share_modify_rate, "Hz"), - ]) - - server_outbound_share_rate = (client_download_share_rate * - num_users / num_servers) - server_outbound_byte_rate = (client_download_byte_rate * - num_users / num_servers) - add_output("Servers", - T.div["download rate (outbound): shares = ", - number(server_outbound_share_rate, "Hz"), - " , bytes = ", - number(server_outbound_byte_rate, "Bps"), - ]) - - - total_share_space = num_servers * share_space_per_server - add_output("Grid", - T.div["Share space consumed: ", - number(total_share_space, "B")]) - add_output("Grid", - T.div[" %% validation: %.2f%%" % - (100.0 * share_validation_per_server / - share_space_per_server)]) - add_output("Grid", - T.div[" %% uri-extension: %.2f%%" % - (100.0 * share_uri_extension_per_server / - share_space_per_server)]) - add_output("Grid", - T.div[" %% lease data: %.2f%%" % - (100.0 * share_lease_per_server / - share_space_per_server)]) - add_output("Grid", - T.div[" %% owner data: %.2f%%" % - (100.0 * share_ownertable_per_server / - share_space_per_server)]) - add_output("Grid", - T.div[" %% share data: %.2f%%" % - (100.0 * share_data_per_server / - share_space_per_server)]) - add_output("Grid", - T.div["file check rate: ", - number(total_file_check_rate, - "Hz")]) - - total_drives = max(mathutil.div_ceil(int(total_share_space), - int(drive_size)), - num_servers) - add_output("Drives", - T.div["Total drives: ", number(total_drives), " drives"]) - drives_per_server = mathutil.div_ceil(total_drives, num_servers) - add_output("Servers", - T.div["Drives per server: ", drives_per_server]) - - # costs - if drive_size == 3000 * 1e9: - add_output("Servers", T.div["3000GB drive: $250 each"]) - drive_cost = 250 - else: - add_output("Servers", - T.div[T.b["unknown cost per drive, assuming $100"]]) - drive_cost = 100 - - if drives_per_server <= 4: - add_output("Servers", T.div["1U box with <= 4 drives: $1500"]) - server_cost = 1500 # typical 1U box - elif drives_per_server <= 12: - add_output("Servers", T.div["2U box with <= 12 drives: $2500"]) - server_cost = 2500 # 2U box - else: - add_output("Servers", - T.div[T.b["Note: too many drives per server, " - "assuming $3000"]]) - server_cost = 3000 - - server_capital_cost = (server_cost + drives_per_server * drive_cost) - total_server_cost = float(num_servers * server_capital_cost) - add_output("Servers", T.div["Capital cost per server: $", - server_capital_cost]) - add_output("Grid", T.div["Capital cost for all servers: $", - number(total_server_cost)]) - # $70/Mbps/mo - # $44/server/mo power+space - server_bandwidth = max(server_inbound_byte_rate, - server_outbound_byte_rate) - server_bandwidth_mbps = mathutil.div_ceil(int(server_bandwidth*8), - int(1e6)) - server_monthly_cost = 70*server_bandwidth_mbps + 44 - add_output("Servers", T.div["Monthly cost per server: $", - server_monthly_cost]) - add_output("Users", T.div["Capital cost per user: $", - number(total_server_cost / num_users)]) - - # reliability - any_drive_failure_rate = total_drives * drive_failure_rate - any_drive_MTBF = 1 // any_drive_failure_rate # in seconds - any_drive_MTBF_days = any_drive_MTBF / 86400 - add_output("Drives", - T.div["MTBF (any drive): ", - number(any_drive_MTBF_days), " days"]) - drive_replacement_monthly_cost = (float(drive_cost) - * any_drive_failure_rate - *30*86400) - add_output("Grid", - T.div["Monthly cost of replacing drives: $", - number(drive_replacement_monthly_cost)]) - - total_server_monthly_cost = float(num_servers * server_monthly_cost - + drive_replacement_monthly_cost) - - add_output("Grid", T.div["Monthly cost for all servers: $", - number(total_server_monthly_cost)]) - add_output("Users", - T.div["Monthly cost per user: $", - number(total_server_monthly_cost / num_users)]) - - # availability - file_dBA = self.file_availability(k, n, server_dBA) - user_files_dBA = self.many_files_availability(file_dBA, - files_per_user) - all_files_dBA = self.many_files_availability(file_dBA, total_files) - add_output("Users", - T.div["availability of: ", - "arbitrary file = %d dBA, " % file_dBA, - "all files of user1 = %d dBA, " % user_files_dBA, - "all files in grid = %d dBA" % all_files_dBA, - ], - ) - - time_until_files_lost = (n-k+1) / any_drive_failure_rate - add_output("Grid", - T.div["avg time until files are lost: ", - number(time_until_files_lost, "s"), ", ", - number(time_until_files_lost/86400, " days"), - ]) - - share_data_loss_rate = any_drive_failure_rate * drive_size - add_output("Grid", - T.div["share data loss rate: ", - number(share_data_loss_rate,"Bps")]) - - # the worst-case survival numbers occur when we do a file check - # and the file is just above the threshold for repair (so we - # decide to not repair it). The question is then: what is the - # chance that the file will decay so badly before the next check - # that we can't recover it? The resulting probability is per - # check interval. - # Note that the chances of us getting into this situation are low. - P_disk_failure_during_interval = (drive_failure_rate * - file_check_interval) - disk_failure_dBF = 10*math.log10(P_disk_failure_during_interval) - disk_failure_dBA = -disk_failure_dBF - file_survives_dBA = self.file_availability(k, repair_threshold, - disk_failure_dBA) - user_files_survives_dBA = self.many_files_availability( \ - file_survives_dBA, files_per_user) - all_files_survives_dBA = self.many_files_availability( \ - file_survives_dBA, total_files) - add_output("Users", - T.div["survival of: ", - "arbitrary file = %d dBA, " % file_survives_dBA, - "all files of user1 = %d dBA, " % - user_files_survives_dBA, - "all files in grid = %d dBA" % - all_files_survives_dBA, - " (per worst-case check interval)", - ]) - - - - all_sections = [] - all_sections.append(build_section("Users")) - all_sections.append(build_section("Servers")) - all_sections.append(build_section("Drives")) - if "Grid" in sections: - all_sections.append(build_section("Grid")) - - f = T.form(action=".", method="post", enctype="multipart/form-data") - - if filled: - action = "Recompute" - else: - action = "Compute" - - f = f[T.input(type="hidden", name="filled", value="true"), - T.input(type="submit", value=action), - all_sections, - ] - - try: - from allmydata import reliability - # we import this just to test to see if the page is available - _hush_pyflakes = reliability - del _hush_pyflakes - f = [T.div[T.a(href="../reliability")["Reliability Math"]], f] - except ImportError: - pass - - return f - - def file_availability(self, k, n, server_dBA): - """ - The full formula for the availability of a specific file is:: - - 1 - sum([choose(N,i) * p**i * (1-p)**(N-i)] for i in range(k)]) - - Where choose(N,i) = N! / ( i! * (N-i)! ) . Note that each term of - this summation is the probability that there are exactly 'i' servers - available, and what we're doing is adding up the cases where i is too - low. - - This is a nuisance to calculate at all accurately, especially once N - gets large, and when p is close to unity. So we make an engineering - approximation: if (1-p) is very small, then each [i] term is much - larger than the [i-1] term, and the sum is dominated by the i=k-1 - term. This only works for (1-p) < 10%, and when the choose() function - doesn't rise fast enough to compensate. For high-expansion encodings - (3-of-10, 25-of-100), the choose() function is rising at the same - time as the (1-p)**(N-i) term, so that's not an issue. For - low-expansion encodings (7-of-10, 75-of-100) the two values are - moving in opposite directions, so more care must be taken. - - Note that the p**i term has only a minor effect as long as (1-p)*N is - small, and even then the effect is attenuated by the 1-p term. - """ - - assert server_dBA > 9 # >=90% availability to use the approximation - factor = binomial(n, k-1) - factor_dBA = 10 * math.log10(factor) - exponent = n - k + 1 - file_dBA = server_dBA * exponent - factor_dBA - return file_dBA - - def many_files_availability(self, file_dBA, num_files): - """The probability that 'num_files' independent bernoulli trials will - succeed (i.e. we can recover all files in the grid at any given - moment) is p**num_files . Since p is close to unity, we express in p - in dBA instead, so we can get useful precision on q (=1-p), and then - the formula becomes:: - - P_some_files_unavailable = 1 - (1 - q)**num_files - - That (1-q)**n expands with the usual binomial sequence, 1 - nq + - Xq**2 ... + Xq**n . We use the same approximation as before, since we - know q is close to zero, and we get to ignore all the terms past -nq. - """ - - many_files_dBA = file_dBA - 10 * math.log10(num_files) - return many_files_dBA diff -Nru tahoe-lafs-1.9.2/src/allmydata/reliability.py tahoe-lafs-1.10.0/src/allmydata/reliability.py --- tahoe-lafs-1.9.2/src/allmydata/reliability.py 2012-05-14 02:07:23.000000000 +0000 +++ tahoe-lafs-1.10.0/src/allmydata/reliability.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,251 +0,0 @@ -#! /usr/bin/python - -import math -from allmydata.util import statistics -from numpy import array, matrix, dot - -DAY=24*60*60 -MONTH=31*DAY -YEAR=365*DAY - -class ReliabilityModel: - """Generate a model of system-wide reliability, given several input - parameters. - - This runs a simulation in which time is quantized down to 'delta' seconds - (default is one month): a smaller delta will result in a more accurate - simulation, but will take longer to run. 'report_span' simulated seconds - will be run. - - The encoding parameters are provided as 'k' (minimum number of shares - needed to recover the file) and 'N' (total number of shares generated). - The default parameters are 3-of-10. - - The first step is to build a probability of individual drive loss during - any given delta. This uses a simple exponential model, in which the - average drive lifetime is specified by the 'drive_lifetime' parameter - (default is 8 years). - - The second step is to calculate a 'transition matrix': a table of - probabilities that shows, given A shares at the start of the delta, what - the chances are of having B shares left at the end of the delta. The - current code optimistically assumes all drives are independent. A - subclass could override that assumption. - - An additional 'repair matrix' is created to show what happens when the - Checker/Repairer is run. In the simulation, the Checker will be run every - 'check_period' seconds (default is one month), and the Repairer will be - run if it sees fewer than 'R' shares (default 7). - - The third step is to finally run the simulation. An initial probability - vector is created (with a 100% chance of N shares and a 0% chance of - fewer than N shares), then it is multiplied by the transition matrix for - every delta of time. Each time the Checker is to be run, the repair - matrix is multiplied in, and some additional stats are accumulated - (average number of repairs that occur, average number of shares - regenerated per repair). - - The output is a ReliabilityReport instance, which contains a table that - samples the state of the simulation once each 'report_period' seconds - (defaults to 3 months). Each row of this table will contain the - probability vector for one sample period (chance of having X shares, from - 0 to N, at the end of the period). The report will also contain other - information. - - """ - - @classmethod - def run(klass, - drive_lifetime=8*YEAR, - k=3, R=7, N=10, - delta=1*MONTH, - check_period=1*MONTH, - report_period=3*MONTH, - report_span=5*YEAR, - ): - self = klass() - - check_period = check_period-1 - P = self.p_in_period(drive_lifetime, delta) - - decay = self.build_decay_matrix(N, P) - - repair = self.build_repair_matrix(k, N, R) - - #print "DECAY:", decay - #print "OLD-POST-REPAIR:", old_post_repair - #print "NEW-POST-REPAIR:", decay * repair - #print "REPAIR:", repair - #print "DIFF:", (old_post_repair - decay * repair) - - START = array([0]*N + [1]) - DEAD = array([1]*k + [0]*(1+N-k)) - REPAIRp = array([0]*k + [1]*(R-k) + [0]*(1+N-R)) - REPAIR_newshares = array([0]*k + - [N-i for i in range(k, R)] + - [0]*(1+N-R)) - assert REPAIR_newshares.shape[0] == N+1 - #print "START", START - #print "REPAIRp", REPAIRp - #print "REPAIR_newshares", REPAIR_newshares - - unmaintained_state = START - maintained_state = START - last_check = 0 - last_report = 0 - P_repaired_last_check_period = 0.0 - needed_repairs = [] - needed_new_shares = [] - report = ReliabilityReport() - - for t in range(0, report_span+delta, delta): - # the .A[0] turns the one-row matrix back into an array - unmaintained_state = (unmaintained_state * decay).A[0] - maintained_state = (maintained_state * decay).A[0] - if (t-last_check) > check_period: - last_check = t - # we do a check-and-repair this frequently - need_repair = dot(maintained_state, REPAIRp) - - P_repaired_last_check_period = need_repair - new_shares = dot(maintained_state, REPAIR_newshares) - needed_repairs.append(need_repair) - needed_new_shares.append(new_shares) - - maintained_state = (maintained_state * repair).A[0] - - if (t-last_report) > report_period: - last_report = t - P_dead_unmaintained = dot(unmaintained_state, DEAD) - P_dead_maintained = dot(maintained_state, DEAD) - cumulative_number_of_repairs = sum(needed_repairs) - cumulative_number_of_new_shares = sum(needed_new_shares) - report.add_sample(t, unmaintained_state, maintained_state, - P_repaired_last_check_period, - cumulative_number_of_repairs, - cumulative_number_of_new_shares, - P_dead_unmaintained, P_dead_maintained) - - # record one more sample at the end of the run - P_dead_unmaintained = dot(unmaintained_state, DEAD) - P_dead_maintained = dot(maintained_state, DEAD) - cumulative_number_of_repairs = sum(needed_repairs) - cumulative_number_of_new_shares = sum(needed_new_shares) - report.add_sample(t, unmaintained_state, maintained_state, - P_repaired_last_check_period, - cumulative_number_of_repairs, - cumulative_number_of_new_shares, - P_dead_unmaintained, P_dead_maintained) - - #def yandm(seconds): - # return "%dy.%dm" % (int(seconds/YEAR), int( (seconds%YEAR)/MONTH)) - #needed_repairs_total = sum(needed_repairs) - #needed_new_shares_total = sum(needed_new_shares) - #print "at 2y:" - #print " unmaintained", unmaintained_state - #print " maintained", maintained_state - #print " number of repairs", needed_repairs_total - #print " new shares generated", needed_new_shares_total - #repair_rate_inv = report_span / needed_repairs_total - #print " avg repair rate: once every %s" % yandm(repair_rate_inv) - #print " avg repair download: one share every %s" % yandm(repair_rate_inv/k) - #print " avg repair upload: one share every %s" % yandm(report_span / needed_new_shares_total) - - return report - - def p_in_period(self, avg_lifetime, period): - """Given an average lifetime of a disk (using an exponential model), - what is the chance that a live disk will survive the next 'period' - seconds?""" - - # eg p_in_period(8*YEAR, MONTH) = 98.94% - return math.exp(-1.0*period/avg_lifetime) - - def build_decay_matrix(self, N, P): - """Return a decay matrix. decay[start_shares][end_shares] is the - conditional probability of finishing with end_shares, given that we - started with start_shares.""" - decay_rows = [] - decay_rows.append( [0.0]*(N+1) ) - for start_shares in range(1, (N+1)): - end_shares = self.build_decay_row(start_shares, P) - decay_row = end_shares + [0.0] * (N-start_shares) - assert len(decay_row) == (N+1), len(decay_row) - decay_rows.append(decay_row) - - decay = matrix(decay_rows) - return decay - - def build_decay_row(self, start_shares, P): - """Return a decay row 'end_shares'. end_shares[i] is the chance that - we finish with i shares, given that we started with start_shares, for - all i between 0 and start_shares, inclusive. This implementation - assumes that all shares are independent (IID), but a more complex - model could incorporate inter-share failure correlations like having - two shares on the same server.""" - end_shares = statistics.binomial_distribution_pmf(start_shares, P) - return end_shares - - def build_repair_matrix(self, k, N, R): - """Return a repair matrix. repair[start][end]: is the conditional - probability of the repairer finishing with 'end' shares, given that - it began with 'start' shares (repair if fewer than R shares). The - repairer's behavior is deterministic, so all values in this matrix - are either 0 or 1. This matrix should be applied *after* the decay - matrix.""" - new_repair_rows = [] - for start_shares in range(0, N+1): - new_repair_row = [0] * (N+1) - if start_shares < k: - new_repair_row[start_shares] = 1 - elif start_shares < R: - new_repair_row[N] = 1 - else: - new_repair_row[start_shares] = 1 - new_repair_rows.append(new_repair_row) - - repair = matrix(new_repair_rows) - return repair - -class ReliabilityReport: - def __init__(self): - self.samples = [] - - def add_sample(self, when, unmaintained_shareprobs, maintained_shareprobs, - P_repaired_last_check_period, - cumulative_number_of_repairs, - cumulative_number_of_new_shares, - P_dead_unmaintained, P_dead_maintained): - """ - when: the timestamp at the end of the report period - unmaintained_shareprobs: a vector of probabilities, element[S] - is the chance that there are S shares - left at the end of the report period. - This tracks what happens if no repair - is ever done. - maintained_shareprobs: same, but for 'maintained' grids, where - check and repair is done at the end - of each check period - P_repaired_last_check_period: a float, with the probability - that a repair was performed - at the end of the most recent - check period. - cumulative_number_of_repairs: a float, with the average number - of repairs that will have been - performed by the end of the - report period - cumulative_number_of_new_shares: a float, with the average number - of new shares that repair proceses - generated by the end of the report - period - P_dead_unmaintained: a float, with the chance that the file will - be unrecoverable at the end of the period - P_dead_maintained: same, but for maintained grids - - """ - row = (when, unmaintained_shareprobs, maintained_shareprobs, - P_repaired_last_check_period, - cumulative_number_of_repairs, - cumulative_number_of_new_shares, - P_dead_unmaintained, P_dead_maintained) - self.samples.append(row) diff -Nru tahoe-lafs-1.9.2/src/allmydata/scripts/admin.py tahoe-lafs-1.10.0/src/allmydata/scripts/admin.py --- tahoe-lafs-1.9.2/src/allmydata/scripts/admin.py 1970-01-01 00:00:00.000000000 +0000 +++ tahoe-lafs-1.10.0/src/allmydata/scripts/admin.py 2013-09-03 15:38:27.000000000 +0000 @@ -0,0 +1,88 @@ + +from twisted.python import usage +from allmydata.scripts.common import BaseOptions + +class GenerateKeypairOptions(BaseOptions): + def getSynopsis(self): + return "Usage: tahoe [global-opts] admin generate-keypair" + + def getUsage(self, width=None): + t = BaseOptions.getUsage(self, width) + t += """ +Generate a public/private keypair, dumped to stdout as two lines of ASCII.. + +""" + return t + +def print_keypair(options): + from allmydata.util.keyutil import make_keypair + out = options.stdout + privkey_vs, pubkey_vs = make_keypair() + print >>out, "private:", privkey_vs + print >>out, "public:", pubkey_vs + +class DerivePubkeyOptions(BaseOptions): + def parseArgs(self, privkey): + self.privkey = privkey + + def getSynopsis(self): + return "Usage: tahoe [global-opts] admin derive-pubkey PRIVKEY" + + def getUsage(self, width=None): + t = BaseOptions.getUsage(self, width) + t += """ +Given a private (signing) key that was previously generated with +generate-keypair, derive the public key and print it to stdout. + +""" + return t + +def derive_pubkey(options): + out = options.stdout + from allmydata.util import keyutil + privkey_vs = options.privkey + sk, pubkey_vs = keyutil.parse_privkey(privkey_vs) + print >>out, "private:", privkey_vs + print >>out, "public:", pubkey_vs + return 0 + +class AdminCommand(BaseOptions): + subCommands = [ + ("generate-keypair", None, GenerateKeypairOptions, + "Generate a public/private keypair, write to stdout."), + ("derive-pubkey", None, DerivePubkeyOptions, + "Derive a public key from a private key."), + ] + def postOptions(self): + if not hasattr(self, 'subOptions'): + raise usage.UsageError("must specify a subcommand") + def getSynopsis(self): + return "Usage: tahoe [global-opts] admin SUBCOMMAND" + def getUsage(self, width=None): + t = BaseOptions.getUsage(self, width) + t += """ +Please run e.g. 'tahoe admin generate-keypair --help' for more details on +each subcommand. +""" + return t + +subDispatch = { + "generate-keypair": print_keypair, + "derive-pubkey": derive_pubkey, + } + +def do_admin(options): + so = options.subOptions + so.stdout = options.stdout + so.stderr = options.stderr + f = subDispatch[options.subCommand] + return f(so) + + +subCommands = [ + ["admin", None, AdminCommand, "admin subcommands: use 'tahoe admin' for a list"], + ] + +dispatch = { + "admin": do_admin, + } diff -Nru tahoe-lafs-1.9.2/src/allmydata/scripts/backupdb.py tahoe-lafs-1.10.0/src/allmydata/scripts/backupdb.py --- tahoe-lafs-1.9.2/src/allmydata/scripts/backupdb.py 2012-05-14 02:07:24.000000000 +0000 +++ tahoe-lafs-1.10.0/src/allmydata/scripts/backupdb.py 2013-09-03 15:38:27.000000000 +0000 @@ -1,11 +1,6 @@ -# the backupdb is only available if sqlite3 is available. Python-2.5.x and -# beyond include sqlite3 in the standard library. For python-2.4, the -# "pysqlite2" "package" (or "module") (which, despite the confusing name, uses -# sqlite3, and which, confusingly, comes in the "pysqlite" "distribution" (or -# "package")) must be installed. On debian, install python-pysqlite2 - import os.path, sys, time, random, stat + from allmydata.util.netstring import netstring from allmydata.util.hashutil import backupdb_dirhash from allmydata.util import base32 @@ -68,19 +63,12 @@ create_version=(SCHEMA_v2, 2), just_create=False): # open or create the given backupdb file. The parent directory must # exist. - try: - import sqlite3 - sqlite = sqlite3 # pyflakes whines about 'import sqlite3 as sqlite' .. - except ImportError: - from pysqlite2 import dbapi2 - sqlite = dbapi2 # .. when this clause does it too - # This import should never fail, because setuptools requires that the - # "pysqlite" distribution is present at start time (if on Python < 2.5). + import sqlite3 must_create = not os.path.exists(dbfile) try: - db = sqlite.connect(dbfile) - except (EnvironmentError, sqlite.OperationalError), e: + db = sqlite3.connect(dbfile) + except (EnvironmentError, sqlite3.OperationalError), e: print >>stderr, "Unable to create/open backupdb file %s: %s" % (dbfile, e) return None @@ -94,7 +82,7 @@ try: c.execute("SELECT version FROM version") version = c.fetchone()[0] - except sqlite.DatabaseError, e: + except sqlite3.DatabaseError, e: # this indicates that the file is not a compatible database format. # Perhaps it was created with an old version, or it might be junk. print >>stderr, "backupdb file is unusable: %s" % e @@ -108,7 +96,7 @@ db.commit() version = 2 if version == 2: - return BackupDB_v2(sqlite, db) + return BackupDB_v2(sqlite3, db) print >>stderr, "Unable to handle backupdb version %s" % version return None @@ -263,7 +251,7 @@ c.execute("INSERT INTO caps (filecap) VALUES (?)", (filecap,)) except (self.sqlite_module.IntegrityError, self.sqlite_module.OperationalError): # sqlite3 on sid gives IntegrityError - # pysqlite2 on dapper gives OperationalError + # pysqlite2 (which we don't use, so maybe no longer relevant) on dapper gives OperationalError pass c.execute("SELECT fileid FROM caps WHERE filecap=?", (filecap,)) foundrow = c.fetchone() diff -Nru tahoe-lafs-1.9.2/src/allmydata/scripts/cli.py tahoe-lafs-1.10.0/src/allmydata/scripts/cli.py --- tahoe-lafs-1.9.2/src/allmydata/scripts/cli.py 2012-05-14 02:07:24.000000000 +0000 +++ tahoe-lafs-1.10.0/src/allmydata/scripts/cli.py 2013-09-03 15:38:27.000000000 +0000 @@ -1,31 +1,27 @@ import os.path, re, fnmatch from twisted.python import usage -from allmydata.scripts.common import BaseOptions, get_aliases, get_default_nodedir, DEFAULT_ALIAS +from allmydata.scripts.common import get_aliases, get_default_nodedir, \ + DEFAULT_ALIAS, BaseOptions from allmydata.util.encodingutil import argv_to_unicode, argv_to_abspath, quote_output NODEURL_RE=re.compile("http(s?)://([^:]*)(:([1-9][0-9]*))?") _default_nodedir = get_default_nodedir() -class VDriveOptions(BaseOptions): +class FilesystemOptions(BaseOptions): optParameters = [ - ["node-directory", "d", None, - "Specify which Tahoe node directory should be used. The directory " - "should either contain a full Tahoe node, or a file named node.url " - "that points to some other Tahoe node. It should also contain a file " - "named '" + os.path.join('private', 'aliases') + "' which contains the " - "mapping from alias name to root dirnode URI." + ( - _default_nodedir and (" [default: " + quote_output(_default_nodedir) + "]") or "")], ["node-url", "u", None, - "Specify the URL of the Tahoe gateway node, such as 'http://127.0.0.1:3456'. " + "Specify the URL of the Tahoe gateway node, such as " + "'http://127.0.0.1:3456'. " "This overrides the URL found in the --node-directory ."], ["dir-cap", None, None, "Specify which dirnode URI should be used as the 'tahoe' alias."] ] def postOptions(self): - if self['node-directory']: - self['node-directory'] = argv_to_abspath(self['node-directory']) + self["quiet"] = self.parent["quiet"] + if self.parent['node-directory']: + self['node-directory'] = argv_to_abspath(self.parent['node-directory']) else: self['node-directory'] = _default_nodedir @@ -49,7 +45,7 @@ self.aliases = aliases # maps alias name to dircap -class MakeDirectoryOptions(VDriveOptions): +class MakeDirectoryOptions(FilesystemOptions): optParameters = [ ("format", None, None, "Create a directory with the given format: SDMF or MDMF (case-insensitive)"), ] @@ -62,11 +58,11 @@ raise usage.UsageError("%s is an invalid format" % self['format']) def getSynopsis(self): - return "Usage: %s mkdir [options] [REMOTE_DIR]" % (self.command_name,) + return "Usage: %s [global-opts] mkdir [options] [REMOTE_DIR]" % (self.command_name,) longdesc = """Create a new directory, either unlinked or as a subdirectory.""" -class AddAliasOptions(VDriveOptions): +class AddAliasOptions(FilesystemOptions): def parseArgs(self, alias, cap): self.alias = argv_to_unicode(alias) if self.alias.endswith(u':'): @@ -74,28 +70,28 @@ self.cap = cap def getSynopsis(self): - return "Usage: %s add-alias [options] ALIAS[:] DIRCAP" % (self.command_name,) + return "Usage: %s [global-opts] add-alias [options] ALIAS[:] DIRCAP" % (self.command_name,) longdesc = """Add a new alias for an existing directory.""" -class CreateAliasOptions(VDriveOptions): +class CreateAliasOptions(FilesystemOptions): def parseArgs(self, alias): self.alias = argv_to_unicode(alias) if self.alias.endswith(u':'): self.alias = self.alias[:-1] def getSynopsis(self): - return "Usage: %s create-alias [options] ALIAS[:]" % (self.command_name,) + return "Usage: %s [global-opts] create-alias [options] ALIAS[:]" % (self.command_name,) longdesc = """Create a new directory and add an alias for it.""" -class ListAliasesOptions(VDriveOptions): +class ListAliasesOptions(FilesystemOptions): def getSynopsis(self): - return "Usage: %s list-aliases [options]" % (self.command_name,) + return "Usage: %s [global-opts] list-aliases [options]" % (self.command_name,) longdesc = """Display a table of all configured aliases.""" -class ListOptions(VDriveOptions): +class ListOptions(FilesystemOptions): optFlags = [ ("long", "l", "Use long format: show file sizes, and timestamps."), ("uri", "u", "Show file/directory URIs."), @@ -106,9 +102,14 @@ def parseArgs(self, where=""): self.where = argv_to_unicode(where) + def getSynopsis(self): + return "Usage: %s [global-opts] ls [options] [PATH]" % (self.command_name,) + longdesc = """ List the contents of some portion of the grid. + If PATH is omitted, "tahoe:" is assumed. + When the -l or --long option is used, each line is shown in the following format: @@ -132,7 +133,7 @@ last modified. """ -class GetOptions(VDriveOptions): +class GetOptions(FilesystemOptions): def parseArgs(self, arg1, arg2=None): # tahoe get FOO |less # write to stdout # tahoe get tahoe:FOO |less # same @@ -150,7 +151,7 @@ self.to_file = None def getSynopsis(self): - return "Usage: %s get [options] REMOTE_FILE LOCAL_FILE" % (self.command_name,) + return "Usage: %s [global-opts] get [options] REMOTE_FILE LOCAL_FILE" % (self.command_name,) longdesc = """ Retrieve a file from the grid and write it to the local filesystem. If @@ -158,7 +159,7 @@ stdout.""" def getUsage(self, width=None): - t = VDriveOptions.getUsage(self, width) + t = FilesystemOptions.getUsage(self, width) t += """ Examples: % tahoe get FOO |less # write to stdout @@ -168,7 +169,7 @@ """ return t -class PutOptions(VDriveOptions): +class PutOptions(FilesystemOptions): optFlags = [ ("mutable", "m", "Create a mutable file instead of an immutable one (like --format=SDMF)"), ] @@ -196,17 +197,21 @@ raise usage.UsageError("%s is an invalid format" % self['format']) def getSynopsis(self): - return "Usage: %s put [options] LOCAL_FILE REMOTE_FILE" % (self.command_name,) + return "Usage: %s [global-opts] put [options] LOCAL_FILE REMOTE_FILE" % (self.command_name,) longdesc = """ Put a file into the grid, copying its contents from the local filesystem. If REMOTE_FILE is missing, upload the file but do not link it into a directory; also print the new filecap to stdout. If LOCAL_FILE is missing or '-', data will be copied from stdin. REMOTE_FILE is assumed to start - with tahoe: unless otherwise specified.""" + with tahoe: unless otherwise specified. + + If the destination file already exists and is mutable, it will be modified + in-place, whether or not --mutable is specified. (--mutable only affects + creation of new files.)""" def getUsage(self, width=None): - t = VDriveOptions.getUsage(self, width) + t = FilesystemOptions.getUsage(self, width) t += """ Examples: % cat FILE | tahoe put # create unlinked file from stdin @@ -219,7 +224,7 @@ """ return t -class CpOptions(VDriveOptions): +class CpOptions(FilesystemOptions): optFlags = [ ("recursive", "r", "Copy source directory recursively."), ("verbose", "v", "Be noisy about what is happening."), @@ -235,7 +240,7 @@ self.destination = argv_to_unicode(args[-1]) def getSynopsis(self): - return "Usage: %s cp [options] FROM.. TO" % (self.command_name,) + return "Usage: %s [global-opts] cp [options] FROM.. TO" % (self.command_name,) longdesc = """ Use 'tahoe cp' to copy files between a local filesystem and a Tahoe grid. @@ -262,24 +267,24 @@ slashes. """ -class UnlinkOptions(VDriveOptions): +class UnlinkOptions(FilesystemOptions): def parseArgs(self, where): self.where = argv_to_unicode(where) def getSynopsis(self): - return "Usage: %s unlink [options] REMOTE_FILE" % (self.command_name,) + return "Usage: %s [global-opts] unlink [options] REMOTE_FILE" % (self.command_name,) class RmOptions(UnlinkOptions): def getSynopsis(self): - return "Usage: %s rm [options] REMOTE_FILE" % (self.command_name,) + return "Usage: %s [global-opts] rm [options] REMOTE_FILE" % (self.command_name,) -class MvOptions(VDriveOptions): +class MvOptions(FilesystemOptions): def parseArgs(self, frompath, topath): self.from_file = argv_to_unicode(frompath) self.to_file = argv_to_unicode(topath) def getSynopsis(self): - return "Usage: %s mv [options] FROM TO" % (self.command_name,) + return "Usage: %s [global-opts] mv [options] FROM TO" % (self.command_name,) longdesc = """ Use 'tahoe mv' to move files that are already on the grid elsewhere on @@ -293,13 +298,13 @@ the grid -- use 'tahoe cp' for that. """ -class LnOptions(VDriveOptions): +class LnOptions(FilesystemOptions): def parseArgs(self, frompath, topath): self.from_file = argv_to_unicode(frompath) self.to_file = argv_to_unicode(topath) def getSynopsis(self): - return "Usage: %s ln [options] FROM_LINK TO_LINK" % (self.command_name,) + return "Usage: %s [global-opts] ln [options] FROM_LINK TO_LINK" % (self.command_name,) longdesc = """ Use 'tahoe ln' to duplicate a link (directory entry) already on the grid @@ -326,7 +331,7 @@ class BackupConfigurationError(Exception): pass -class BackupOptions(VDriveOptions): +class BackupOptions(FilesystemOptions): optFlags = [ ("verbose", "v", "Be noisy about what is happening."), ("ignore-timestamps", None, "Do not use backupdb timestamps to decide whether a local file is unchanged."), @@ -346,7 +351,7 @@ self.to_dir = argv_to_unicode(topath) def getSynopsis(self): - return "Usage: %s backup [options] FROM ALIAS:TO" % (self.command_name,) + return "Usage: %s [global-opts] backup [options] FROM ALIAS:TO" % (self.command_name,) def opt_exclude(self, pattern): """Ignore files matching a glob pattern. You may give multiple @@ -396,7 +401,7 @@ --link-dest=TO/Archives/(previous) FROM TO/Archives/(new); ln -sf TO/Archives/(new) TO/Latest'.""" -class WebopenOptions(VDriveOptions): +class WebopenOptions(FilesystemOptions): optFlags = [ ("info", "i", "Open the t=info page for the file"), ] @@ -404,13 +409,13 @@ self.where = argv_to_unicode(where) def getSynopsis(self): - return "Usage: %s webopen [options] [ALIAS:PATH]" % (self.command_name,) + return "Usage: %s [global-opts] webopen [options] [ALIAS:PATH]" % (self.command_name,) longdesc = """Open a web browser to the contents of some file or directory on the grid. When run without arguments, open the Welcome page.""" -class ManifestOptions(VDriveOptions): +class ManifestOptions(FilesystemOptions): optFlags = [ ("storage-index", "s", "Only print storage index strings, not pathname+cap."), ("verify-cap", None, "Only print verifycap, not pathname+cap."), @@ -421,12 +426,12 @@ self.where = argv_to_unicode(where) def getSynopsis(self): - return "Usage: %s manifest [options] [ALIAS:PATH]" % (self.command_name,) + return "Usage: %s [global-opts] manifest [options] [ALIAS:PATH]" % (self.command_name,) longdesc = """Print a list of all files and directories reachable from the given starting point.""" -class StatsOptions(VDriveOptions): +class StatsOptions(FilesystemOptions): optFlags = [ ("raw", "r", "Display raw JSON data instead of parsed"), ] @@ -434,12 +439,12 @@ self.where = argv_to_unicode(where) def getSynopsis(self): - return "Usage: %s stats [options] [ALIAS:PATH]" % (self.command_name,) + return "Usage: %s [global-opts] stats [options] [ALIAS:PATH]" % (self.command_name,) longdesc = """Print statistics about of all files and directories reachable from the given starting point.""" -class CheckOptions(VDriveOptions): +class CheckOptions(FilesystemOptions): optFlags = [ ("raw", None, "Display raw JSON data instead of parsed."), ("verify", None, "Verify all hashes, instead of merely querying share presence."), @@ -450,14 +455,14 @@ self.where = argv_to_unicode(where) def getSynopsis(self): - return "Usage: %s check [options] [ALIAS:PATH]" % (self.command_name,) + return "Usage: %s [global-opts] check [options] [ALIAS:PATH]" % (self.command_name,) longdesc = """ Check a single file or directory: count how many shares are available and verify their hashes. Optionally repair the file if any problems were found.""" -class DeepCheckOptions(VDriveOptions): +class DeepCheckOptions(FilesystemOptions): optFlags = [ ("raw", None, "Display raw JSON data instead of parsed."), ("verify", None, "Verify all hashes, instead of merely querying share presence."), @@ -469,7 +474,7 @@ self.where = argv_to_unicode(where) def getSynopsis(self): - return "Usage: %s deep-check [options] [ALIAS:PATH]" % (self.command_name,) + return "Usage: %s [global-opts] deep-check [options] [ALIAS:PATH]" % (self.command_name,) longdesc = """ Check all files and directories reachable from the given starting point diff -Nru tahoe-lafs-1.9.2/src/allmydata/scripts/common.py tahoe-lafs-1.10.0/src/allmydata/scripts/common.py --- tahoe-lafs-1.9.2/src/allmydata/scripts/common.py 2012-05-14 02:07:24.000000000 +0000 +++ tahoe-lafs-1.10.0/src/allmydata/scripts/common.py 2013-09-03 15:38:27.000000000 +0000 @@ -25,58 +25,42 @@ class BaseOptions(usage.Options): - # unit tests can override these to point at StringIO instances - stdin = sys.stdin - stdout = sys.stdout - stderr = sys.stderr - - optFlags = [ - ["quiet", "q", "Operate silently."], - ["version", "V", "Display version numbers."], - ["version-and-path", None, "Display version numbers and paths to their locations."], - ] - optParameters = [ - ["node-directory", "d", None, "Specify which Tahoe node directory should be used." + ( - _default_nodedir and (" [default for most commands: " + quote_output(_default_nodedir) + "]") or "")], - ] - def __init__(self): super(BaseOptions, self).__init__() self.command_name = os.path.basename(sys.argv[0]) if self.command_name == 'trial': self.command_name = 'tahoe' + # Only allow "tahoe --version", not e.g. "tahoe start --version" def opt_version(self): - import allmydata - print >>self.stdout, allmydata.get_package_versions_string(debug=True) - self.no_command_needed = True - - def opt_version_and_path(self): - import allmydata - print >>self.stdout, allmydata.get_package_versions_string(show_paths=True, debug=True) - self.no_command_needed = True + raise usage.UsageError("--version not allowed on subcommands") - -class BasedirMixin: +class BasedirOptions(BaseOptions): default_nodedir = _default_nodedir optParameters = [ - ["basedir", "C", None, "Same as --node-directory."], + ["basedir", "C", None, "Same as --node-directory (default %s)." + % get_default_nodedir()], ] def parseArgs(self, basedir=None): - if self['node-directory'] and self['basedir']: - raise usage.UsageError("The --node-directory (or -d) and --basedir (or -C) " - "options cannot both be used.") + if self.parent['node-directory'] and self['basedir']: + raise usage.UsageError("The --node-directory (or -d) and --basedir (or -C) options cannot both be used.") + if self.parent['node-directory'] and basedir: + raise usage.UsageError("The --node-directory (or -d) option and a basedir argument cannot both be used.") + if self['basedir'] and basedir: + raise usage.UsageError("The --basedir (or -C) option and a basedir argument cannot both be used.") if basedir: b = argv_to_abspath(basedir) elif self['basedir']: b = argv_to_abspath(self['basedir']) - elif self['node-directory']: - b = argv_to_abspath(self['node-directory']) - else: + elif self.parent['node-directory']: + b = argv_to_abspath(self.parent['node-directory']) + elif self.default_nodedir: b = self.default_nodedir + else: + raise usage.UsageError("No default basedir available, you must provide one with --node-directory, --basedir, or a basedir argument") self['basedir'] = b def postOptions(self): @@ -88,7 +72,6 @@ def get_aliases(nodedir): - from allmydata import uri aliases = {} aliasfile = os.path.join(nodedir, "private", "aliases") rootfile = os.path.join(nodedir, "private", "root_dir.cap") @@ -96,7 +79,7 @@ f = open(rootfile, "r") rootcap = f.read().strip() if rootcap: - aliases[DEFAULT_ALIAS] = uri.from_string_dirnode(rootcap).to_string() + aliases[DEFAULT_ALIAS] = rootcap except EnvironmentError: pass try: @@ -108,7 +91,7 @@ name, cap = line.split(u":", 1) # normalize it: remove http: prefix, urldecode cap = cap.strip().encode('utf-8') - aliases[name] = uri.from_string_dirnode(cap).to_string() + aliases[name] = cap except EnvironmentError: pass return aliases @@ -174,7 +157,7 @@ raise UnknownAliasError("No alias specified, and the default %s alias doesn't exist. " "To create it, use 'tahoe create-alias %s'." % (quote_output(default), quote_output(default, quotemarks=False))) - return aliases[default], path + return uri.from_string_dirnode(aliases[default]).to_string(), path if colon == 1 and default is None and platform_uses_lettercolon_drivename(): # treat C:\why\must\windows\be\so\weird as a local path, not a tahoe # file in the "C:" alias @@ -191,11 +174,11 @@ raise UnknownAliasError("No alias specified, and the default %s alias doesn't exist. " "To create it, use 'tahoe create-alias %s'." % (quote_output(default), quote_output(default, quotemarks=False))) - return aliases[default], path + return uri.from_string_dirnode(aliases[default]).to_string(), path if alias not in aliases: raise UnknownAliasError("Unknown alias %s, please create it with 'tahoe add-alias' or 'tahoe create-alias'." % quote_output(alias)) - return aliases[alias], path[colon+1:] + return uri.from_string_dirnode(aliases[alias]).to_string(), path[colon+1:] def escape_path(path): segments = path.split("/") diff -Nru tahoe-lafs-1.9.2/src/allmydata/scripts/common_http.py tahoe-lafs-1.10.0/src/allmydata/scripts/common_http.py --- tahoe-lafs-1.9.2/src/allmydata/scripts/common_http.py 2012-05-14 02:07:24.000000000 +0000 +++ tahoe-lafs-1.10.0/src/allmydata/scripts/common_http.py 2013-09-03 15:38:27.000000000 +0000 @@ -1,11 +1,12 @@ +import os from cStringIO import StringIO import urlparse, httplib import allmydata # for __full_version__ from allmydata.util.encodingutil import quote_output from allmydata.scripts.common import TahoeError - +from socket import error as socket_error # copied from twisted/web/client.py def parse_url(url, defaultPort=None): @@ -26,6 +27,13 @@ path = "/" return scheme, host, port, path +class BadResponse(object): + def __init__(self, url, err): + self.status = -1 + self.reason = "Error trying to connect to %s: %s" % (url, err) + def read(self): + return "" + def do_http(method, url, body=""): if isinstance(body, str): @@ -53,11 +61,15 @@ c.putheader("Connection", "close") old = body.tell() - body.seek(0, 2) + body.seek(0, os.SEEK_END) length = body.tell() body.seek(old) c.putheader("Content-Length", str(length)) - c.endheaders() + + try: + c.endheaders() + except socket_error, err: + return BadResponse(url, err) while True: data = body.read(8192) diff -Nru tahoe-lafs-1.9.2/src/allmydata/scripts/create_node.py tahoe-lafs-1.10.0/src/allmydata/scripts/create_node.py --- tahoe-lafs-1.9.2/src/allmydata/scripts/create_node.py 2012-05-14 02:24:30.000000000 +0000 +++ tahoe-lafs-1.10.0/src/allmydata/scripts/create_node.py 2013-09-03 15:38:27.000000000 +0000 @@ -1,11 +1,11 @@ import os, sys -from allmydata.scripts.common import BasedirMixin, BaseOptions +from allmydata.scripts.common import BasedirOptions from allmydata.util.assertutil import precondition from allmydata.util.encodingutil import listdir_unicode, argv_to_unicode, quote_output import allmydata -class CreateClientOptions(BasedirMixin, BaseOptions): +class CreateClientOptions(BasedirOptions): optParameters = [ # we provide 'create-node'-time options for the most common # configuration knobs. The rest can be controlled by editing @@ -17,7 +17,7 @@ ] def getSynopsis(self): - return "Usage: %s create-client [options] [NODEDIR]" % (self.command_name,) + return "Usage: %s [global-opts] create-client [options] [NODEDIR]" % (self.command_name,) class CreateNodeOptions(CreateClientOptions): @@ -26,18 +26,14 @@ ] def getSynopsis(self): - return "Usage: %s create-node [options] [NODEDIR]" % (self.command_name,) + return "Usage: %s [global-opts] create-node [options] [NODEDIR]" % (self.command_name,) -class CreateIntroducerOptions(BasedirMixin, BaseOptions): +class CreateIntroducerOptions(BasedirOptions): default_nodedir = None - optParameters = [ - ["node-directory", "d", None, "Specify which directory the introducer should be created in. [no default]"], - ] - def getSynopsis(self): - return "Usage: %s create-introducer [options] NODEDIR" % (self.command_name,) + return "Usage: %s [global-opts] create-introducer [options] NODEDIR" % (self.command_name,) client_tac = """ diff -Nru tahoe-lafs-1.9.2/src/allmydata/scripts/debug.py tahoe-lafs-1.10.0/src/allmydata/scripts/debug.py --- tahoe-lafs-1.9.2/src/allmydata/scripts/debug.py 2012-05-14 02:07:24.000000000 +0000 +++ tahoe-lafs-1.10.0/src/allmydata/scripts/debug.py 2013-09-03 15:38:27.000000000 +0000 @@ -5,11 +5,13 @@ from twisted.python import usage, failure from twisted.internet import defer from twisted.scripts import trial as twisted_trial +from foolscap.logging import cli as foolscap_cli +from allmydata.scripts.common import BaseOptions -class DumpOptions(usage.Options): +class DumpOptions(BaseOptions): def getSynopsis(self): - return "Usage: tahoe debug dump-share SHARE_FILENAME" + return "Usage: tahoe [global-opts] debug dump-share SHARE_FILENAME" optFlags = [ ["offsets", None, "Display a table of section offsets."], @@ -17,7 +19,7 @@ ] def getUsage(self, width=None): - t = usage.Options.getUsage(self, width) + t = BaseOptions.getUsage(self, width) t += """ Print lots of information about the given share, by parsing the share's contents. This includes share type, lease information, encoding parameters, @@ -404,9 +406,9 @@ -class DumpCapOptions(usage.Options): +class DumpCapOptions(BaseOptions): def getSynopsis(self): - return "Usage: tahoe debug dump-cap [options] FILECAP" + return "Usage: tahoe [global-opts] debug dump-cap [options] FILECAP" optParameters = [ ["nodeid", "n", None, "Specify the storage server nodeid (ASCII), to construct WE and secrets."], @@ -419,7 +421,7 @@ self.cap = cap def getUsage(self, width=None): - t = usage.Options.getUsage(self, width) + t = BaseOptions.getUsage(self, width) t += """ Print information about the given cap-string (aka: URI, file-cap, dir-cap, read-cap, write-cap). The URI string is parsed and unpacked. This prints the @@ -606,9 +608,9 @@ else: print >>out, "unknown cap type" -class FindSharesOptions(usage.Options): +class FindSharesOptions(BaseOptions): def getSynopsis(self): - return "Usage: tahoe debug find-shares STORAGE_INDEX NODEDIRS.." + return "Usage: tahoe [global-opts] debug find-shares STORAGE_INDEX NODEDIRS.." def parseArgs(self, storage_index_s, *nodedirs): from allmydata.util.encodingutil import argv_to_abspath @@ -616,7 +618,7 @@ self.nodedirs = map(argv_to_abspath, nodedirs) def getUsage(self, width=None): - t = usage.Options.getUsage(self, width) + t = BaseOptions.getUsage(self, width) t += """ Locate all shares for the given storage index. This command looks through one or more node directories to find the shares. It returns a list of filenames, @@ -656,7 +658,7 @@ return 0 -class CatalogSharesOptions(usage.Options): +class CatalogSharesOptions(BaseOptions): """ """ @@ -667,10 +669,10 @@ raise usage.UsageError("must specify at least one node directory") def getSynopsis(self): - return "Usage: tahoe debug catalog-shares NODEDIRS.." + return "Usage: tahoe [global-opts] debug catalog-shares NODEDIRS.." def getUsage(self, width=None): - t = usage.Options.getUsage(self, width) + t = BaseOptions.getUsage(self, width) t += """ Locate all shares in the given node directories, and emit a one-line summary of each share. Run it like this: @@ -878,16 +880,16 @@ print >>err, "Error processing %s" % quote_output(si_dir) failure.Failure().printTraceback(err) -class CorruptShareOptions(usage.Options): +class CorruptShareOptions(BaseOptions): def getSynopsis(self): - return "Usage: tahoe debug corrupt-share SHARE_FILENAME" + return "Usage: tahoe [global-opts] debug corrupt-share SHARE_FILENAME" optParameters = [ ["offset", "o", "block-random", "Specify which bit to flip."], ] def getUsage(self, width=None): - t = usage.Options.getUsage(self, width) + t = BaseOptions.getUsage(self, width) t += """ Corrupt the given share by flipping a bit. This will cause a verifying/downloading client to log an integrity-check failure incident, and @@ -958,9 +960,9 @@ -class ReplOptions(usage.Options): +class ReplOptions(BaseOptions): def getSynopsis(self): - return "Usage: tahoe debug repl" + return "Usage: tahoe [global-opts] debug repl" def repl(options): import code @@ -971,7 +973,7 @@ class TrialOptions(twisted_trial.Options): def getSynopsis(self): - return "Usage: tahoe debug trial [options] [[file|package|module|TestCase|testmethod]...]" + return "Usage: tahoe [global-opts] debug trial [options] [[file|package|module|TestCase|testmethod]...]" def parseOptions(self, all_subargs, *a, **kw): self.trial_args = list(all_subargs) @@ -998,7 +1000,50 @@ twisted_trial.run() -class DebugCommand(usage.Options): +def fixOptionsClass( (subcmd, shortcut, OptionsClass, desc) ): + class FixedOptionsClass(OptionsClass): + def getSynopsis(self): + t = OptionsClass.getSynopsis(self) + i = t.find("Usage: flogtool ") + if i >= 0: + return "Usage: tahoe [global-opts] debug flogtool " + t[i+len("Usage: flogtool "):] + else: + return "Usage: tahoe [global-opts] debug flogtool %s [options]" % (subcmd,) + return (subcmd, shortcut, FixedOptionsClass, desc) + +class FlogtoolOptions(foolscap_cli.Options): + def __init__(self): + super(FlogtoolOptions, self).__init__() + self.subCommands = map(fixOptionsClass, self.subCommands) + + def getSynopsis(self): + return "Usage: tahoe [global-opts] debug flogtool (%s) [command options]" % ("|".join([x[0] for x in self.subCommands])) + + def parseOptions(self, all_subargs, *a, **kw): + self.flogtool_args = list(all_subargs) + return super(FlogtoolOptions, self).parseOptions(self.flogtool_args, *a, **kw) + + def getUsage(self, width=None): + t = super(FlogtoolOptions, self).getUsage(width) + t += """ +The 'tahoe debug flogtool' command uses the correct imports for this instance +of Tahoe-LAFS. + +Please run 'tahoe debug flogtool SUBCOMMAND --help' for more details on each +subcommand. +""" + return t + + def opt_help(self): + print str(self) + sys.exit(0) + +def flogtool(config): + sys.argv = ['flogtool'] + config.flogtool_args + return foolscap_cli.run_flogtool() + + +class DebugCommand(BaseOptions): subCommands = [ ["dump-share", None, DumpOptions, "Unpack and display the contents of a share (uri_extension and leases)."], @@ -1008,15 +1053,16 @@ ["corrupt-share", None, CorruptShareOptions, "Corrupt a share by flipping a bit."], ["repl", None, ReplOptions, "Open a Python interpreter."], ["trial", None, TrialOptions, "Run tests using Twisted Trial with the right imports."], + ["flogtool", None, FlogtoolOptions, "Utilities to access log files."], ] def postOptions(self): if not hasattr(self, 'subOptions'): raise usage.UsageError("must specify a subcommand") def getSynopsis(self): - return "Usage: tahoe debug SUBCOMMAND" + return "" def getUsage(self, width=None): - #t = usage.Options.getUsage(self, width) - t = """ + #t = BaseOptions.getUsage(self, width) + t = """Usage: tahoe debug SUBCOMMAND Subcommands: tahoe debug dump-share Unpack and display the contents of a share. tahoe debug dump-cap Unpack a read-cap or write-cap. @@ -1025,6 +1071,7 @@ tahoe debug corrupt-share Corrupt a share by flipping a bit. tahoe debug repl Open a Python interpreter. tahoe debug trial Run tests using Twisted Trial with the right imports. + tahoe debug flogtool Utilities to access log files. Please run e.g. 'tahoe debug dump-share --help' for more details on each subcommand. @@ -1065,6 +1112,7 @@ "corrupt-share": corrupt_share, "repl": repl, "trial": trial, + "flogtool": flogtool, } diff -Nru tahoe-lafs-1.9.2/src/allmydata/scripts/keygen.py tahoe-lafs-1.10.0/src/allmydata/scripts/keygen.py --- tahoe-lafs-1.9.2/src/allmydata/scripts/keygen.py 2012-05-14 02:07:24.000000000 +0000 +++ tahoe-lafs-1.10.0/src/allmydata/scripts/keygen.py 2013-09-03 15:38:27.000000000 +0000 @@ -1,18 +1,14 @@ import os, sys -from allmydata.scripts.common import BasedirMixin, BaseOptions +from allmydata.scripts.common import BasedirOptions from allmydata.util.assertutil import precondition from allmydata.util.encodingutil import listdir_unicode, quote_output -class CreateKeyGeneratorOptions(BasedirMixin, BaseOptions): +class CreateKeyGeneratorOptions(BasedirOptions): default_nodedir = None - optParameters = [ - ["node-directory", "d", None, "Specify which directory the key-generator should be created in. [no default]"], - ] - def getSynopsis(self): - return "Usage: %s create-key-generator [options] NODEDIR" % (self.command_name,) + return "Usage: %s [global-opts] create-key-generator [options] NODEDIR" % (self.command_name,) keygen_tac = """ diff -Nru tahoe-lafs-1.9.2/src/allmydata/scripts/runner.py tahoe-lafs-1.10.0/src/allmydata/scripts/runner.py --- tahoe-lafs-1.9.2/src/allmydata/scripts/runner.py 2012-05-14 02:07:24.000000000 +0000 +++ tahoe-lafs-1.10.0/src/allmydata/scripts/runner.py 2013-09-03 15:38:27.000000000 +0000 @@ -1,11 +1,11 @@ -import sys +import os, sys from cStringIO import StringIO from twisted.python import usage -from allmydata.scripts.common import BaseOptions -from allmydata.scripts import debug, create_node, startstop_node, cli, keygen, stats_gatherer +from allmydata.scripts.common import get_default_nodedir +from allmydata.scripts import debug, create_node, startstop_node, cli, keygen, stats_gatherer, admin from allmydata.util.encodingutil import quote_output, get_io_encoding def GROUP(s): @@ -15,12 +15,30 @@ return [("\n" + s, None, None, None)] -class Options(BaseOptions, usage.Options): +_default_nodedir = get_default_nodedir() + +NODEDIR_HELP = ("Specify which Tahoe node directory should be used. The " + "directory should either contain a full Tahoe node, or a " + "file named node.url that points to some other Tahoe node. " + "It should also contain a file named '" + + os.path.join('private', 'aliases') + + "' which contains the mapping from alias name to root " + "dirnode URI.") +if _default_nodedir: + NODEDIR_HELP += " [default for most commands: " + quote_output(_default_nodedir) + "]" + +class Options(usage.Options): + # unit tests can override these to point at StringIO instances + stdin = sys.stdin + stdout = sys.stdout + stderr = sys.stderr + synopsis = "\nUsage: tahoe [command options]" subCommands = ( GROUP("Administration") + create_node.subCommands + keygen.subCommands + stats_gatherer.subCommands + + admin.subCommands + GROUP("Controlling a node") + startstop_node.subCommands + GROUP("Debugging") @@ -29,6 +47,28 @@ + cli.subCommands ) + optFlags = [ + ["quiet", "q", "Operate silently."], + ["version", "V", "Display version numbers."], + ["version-and-path", None, "Display version numbers and paths to their locations."], + ] + optParameters = [ + ["node-directory", "d", None, NODEDIR_HELP], + ] + + def opt_version(self): + import allmydata + print >>self.stdout, allmydata.get_package_versions_string(debug=True) + self.no_command_needed = True + + def opt_version_and_path(self): + import allmydata + print >>self.stdout, allmydata.get_package_versions_string(show_paths=True, debug=True) + self.no_command_needed = True + + def getSynopsis(self): + return "\nUsage: tahoe [global-opts] [command-options]" + def getUsage(self, **kwargs): t = usage.Options.getUsage(self, **kwargs) return t + "\nPlease run 'tahoe --help' for more details on each command.\n" @@ -95,6 +135,8 @@ rc = startstop_node.dispatch[command](so, stdout, stderr) elif command in debug.dispatch: rc = debug.dispatch[command](so) + elif command in admin.dispatch: + rc = admin.dispatch[command](so) elif command in cli.dispatch: rc = cli.dispatch[command](so) elif command in ac_dispatch: @@ -106,9 +148,15 @@ def run(install_node_control=True): - if sys.platform == "win32": - from allmydata.windows.fixups import initialize - initialize() + try: + if sys.platform == "win32": + from allmydata.windows.fixups import initialize + initialize() + + rc = runner(sys.argv[1:], install_node_control=install_node_control) + except Exception: + import traceback + traceback.print_exc() + rc = 1 - rc = runner(sys.argv[1:], install_node_control=install_node_control) sys.exit(rc) diff -Nru tahoe-lafs-1.9.2/src/allmydata/scripts/startstop_node.py tahoe-lafs-1.10.0/src/allmydata/scripts/startstop_node.py --- tahoe-lafs-1.9.2/src/allmydata/scripts/startstop_node.py 2012-05-14 02:07:24.000000000 +0000 +++ tahoe-lafs-1.10.0/src/allmydata/scripts/startstop_node.py 2013-09-03 15:38:27.000000000 +0000 @@ -1,45 +1,41 @@ import os, sys, signal, time -from allmydata.scripts.common import BasedirMixin, BaseOptions +from allmydata.scripts.common import BasedirOptions from allmydata.util import fileutil from allmydata.util.assertutil import precondition from allmydata.util.encodingutil import listdir_unicode, quote_output -class StartOptions(BasedirMixin, BaseOptions): +class StartOptions(BasedirOptions): optFlags = [ ["profile", "p", "Run under the Python profiler, putting results in 'profiling_results.prof'."], ["syslog", None, "Tell the node to log to syslog, not a file."], ] def getSynopsis(self): - return "Usage: %s start [options] [NODEDIR]" % (self.command_name,) + return "Usage: %s [global-opts] start [options] [NODEDIR]" % (self.command_name,) -class StopOptions(BasedirMixin, BaseOptions): +class StopOptions(BasedirOptions): def getSynopsis(self): - return "Usage: %s stop [options] [NODEDIR]" % (self.command_name,) + return "Usage: %s [global-opts] stop [options] [NODEDIR]" % (self.command_name,) -class RestartOptions(BasedirMixin, BaseOptions): +class RestartOptions(BasedirOptions): optFlags = [ ["profile", "p", "Run under the Python profiler, putting results in 'profiling_results.prof'."], ["syslog", None, "Tell the node to log to syslog, not a file."], ] def getSynopsis(self): - return "Usage: %s restart [options] [NODEDIR]" % (self.command_name,) + return "Usage: %s [global-opts] restart [options] [NODEDIR]" % (self.command_name,) -class RunOptions(BasedirMixin, BaseOptions): +class RunOptions(BasedirOptions): default_nodedir = u"." - optParameters = [ - ["node-directory", "d", None, "Specify the directory of the node to be run. [default, for 'tahoe run' only: current directory]"], - ] - def getSynopsis(self): - return "Usage: %s run [options] [NODEDIR]" % (self.command_name,) + return "Usage: %s [global-opts] run [options] [NODEDIR]" % (self.command_name,) def start(opts, out=sys.stdout, err=sys.stderr): diff -Nru tahoe-lafs-1.9.2/src/allmydata/scripts/stats_gatherer.py tahoe-lafs-1.10.0/src/allmydata/scripts/stats_gatherer.py --- tahoe-lafs-1.9.2/src/allmydata/scripts/stats_gatherer.py 2012-05-14 02:07:24.000000000 +0000 +++ tahoe-lafs-1.10.0/src/allmydata/scripts/stats_gatherer.py 2013-09-03 15:38:27.000000000 +0000 @@ -1,18 +1,14 @@ import os, sys -from allmydata.scripts.common import BasedirMixin, BaseOptions +from allmydata.scripts.common import BasedirOptions from allmydata.util.assertutil import precondition from allmydata.util.encodingutil import listdir_unicode, quote_output -class CreateStatsGathererOptions(BasedirMixin, BaseOptions): +class CreateStatsGathererOptions(BasedirOptions): default_nodedir = None - optParameters = [ - ["node-directory", "d", None, "Specify which directory the stats-gatherer should be created in. [no default]"], - ] - def getSynopsis(self): - return "Usage: %s create-stats-gatherer [options] NODEDIR" % (self.command_name,) + return "Usage: %s [global-opts] create-stats-gatherer [options] NODEDIR" % (self.command_name,) stats_gatherer_tac = """ diff -Nru tahoe-lafs-1.9.2/src/allmydata/scripts/tahoe_check.py tahoe-lafs-1.10.0/src/allmydata/scripts/tahoe_check.py --- tahoe-lafs-1.9.2/src/allmydata/scripts/tahoe_check.py 2012-05-14 02:07:24.000000000 +0000 +++ tahoe-lafs-1.10.0/src/allmydata/scripts/tahoe_check.py 2013-09-03 15:38:27.000000000 +0000 @@ -77,19 +77,26 @@ else: stdout.write(" repair failed\n") else: - stdout.write("Summary: %s\n" % quote_output(data["summary"], quotemarks=False)) + # LIT files and directories do not have a "summary" field. + summary = data.get("summary", "Healthy (LIT)") + stdout.write("Summary: %s\n" % quote_output(summary, quotemarks=False)) cr = data["results"] stdout.write(" storage index: %s\n" % quote_output(data["storage-index"], quotemarks=False)) - stdout.write(" good-shares: %r (encoding is %r-of-%r)\n" - % (cr["count-shares-good"], - cr["count-shares-needed"], - cr["count-shares-expected"])) - stdout.write(" wrong-shares: %r\n" % cr["count-wrong-shares"]) - corrupt = cr["list-corrupt-shares"] + + if all([field in cr for field in ("count-shares-good", "count-shares-needed", + "count-shares-expected", "count-wrong-shares")]): + stdout.write(" good-shares: %r (encoding is %r-of-%r)\n" + % (cr["count-shares-good"], + cr["count-shares-needed"], + cr["count-shares-expected"])) + stdout.write(" wrong-shares: %r\n" % cr["count-wrong-shares"]) + + corrupt = cr.get("list-corrupt-shares", []) if corrupt: stdout.write(" corrupt shares:\n") for (serverid, storage_index, sharenum) in corrupt: stdout.write(" %s\n" % _quote_serverid_index_share(serverid, storage_index, sharenum)) + return 0 @@ -138,6 +145,8 @@ path = d["path"] if not path: path = [""] + + # LIT files and directories do not have a "summary" field. summary = cr.get("summary", "Healthy (LIT)") print >>stdout, "%s: %s" % (quote_path(path), quote_output(summary, quotemarks=False)) diff -Nru tahoe-lafs-1.9.2/src/allmydata/scripts/tahoe_cp.py tahoe-lafs-1.10.0/src/allmydata/scripts/tahoe_cp.py --- tahoe-lafs-1.9.2/src/allmydata/scripts/tahoe_cp.py 2012-05-14 02:07:24.000000000 +0000 +++ tahoe-lafs-1.10.0/src/allmydata/scripts/tahoe_cp.py 2013-09-03 15:38:27.000000000 +0000 @@ -103,7 +103,7 @@ self.children = {} children = listdir_unicode(self.pathname) for i,n in enumerate(children): - self.progressfunc("examining %d of %d" % (i, len(children))) + self.progressfunc("examining %d of %d" % (i+1, len(children))) pn = os.path.join(self.pathname, n) if os.path.isdir(pn): child = LocalDirectorySource(self.progressfunc, pn) @@ -131,7 +131,7 @@ self.children = {} children = listdir_unicode(self.pathname) for i,n in enumerate(children): - self.progressfunc("examining %d of %d" % (i, len(children))) + self.progressfunc("examining %d of %d" % (i+1, len(children))) n = unicode(n) pn = os.path.join(self.pathname, n) if os.path.isdir(pn): @@ -239,7 +239,7 @@ return self.children = {} for i,(name, data) in enumerate(self.children_d.items()): - self.progressfunc("examining %d of %d" % (i, len(self.children_d))) + self.progressfunc("examining %d of %d" % (i+1, len(self.children_d))) if data[0] == "filenode": mutable = data[1].get("mutable", False) writecap = to_str(data[1].get("rw_uri")) @@ -333,7 +333,7 @@ return self.children = {} for i,(name, data) in enumerate(self.children_d.items()): - self.progressfunc("examining %d of %d" % (i, len(self.children_d))) + self.progressfunc("examining %d of %d" % (i+1, len(self.children_d))) if data[0] == "filenode": mutable = data[1].get("mutable", False) writecap = to_str(data[1].get("rw_uri")) @@ -652,7 +652,6 @@ for (name,s) in source_files: self.attach_to_target(s, name, target) - self.files_to_copy += 1 for source in source_dirs: self.assign_targets(source, target) diff -Nru tahoe-lafs-1.9.2/src/allmydata/scripts/tahoe_ls.py tahoe-lafs-1.10.0/src/allmydata/scripts/tahoe_ls.py --- tahoe-lafs-1.9.2/src/allmydata/scripts/tahoe_ls.py 2012-05-14 02:07:24.000000000 +0000 +++ tahoe-lafs-1.10.0/src/allmydata/scripts/tahoe_ls.py 2013-09-03 15:38:27.000000000 +0000 @@ -47,14 +47,16 @@ print >>stdout, data return 0 else: - print >>stderr, "The JSON response contained unprintable characters:\n%s" % quote_output(data) + print >>stderr, "The JSON response contained unprintable characters:" + print >>stderr, quote_output(data, quotemarks=False) return 1 try: parsed = simplejson.loads(data) except Exception, e: print >>stderr, "error: %s" % quote_output(e.args[0], quotemarks=False) - print >>stderr, "Could not parse JSON response:\n%s" % quote_output(data) + print >>stderr, "Could not parse JSON response:" + print >>stderr, quote_output(data, quotemarks=False) return 1 nodetype, d = parsed diff -Nru tahoe-lafs-1.9.2/src/allmydata/storage/server.py tahoe-lafs-1.10.0/src/allmydata/storage/server.py --- tahoe-lafs-1.9.2/src/allmydata/storage/server.py 2012-06-22 15:13:40.000000000 +0000 +++ tahoe-lafs-1.10.0/src/allmydata/storage/server.py 2013-09-03 15:38:27.000000000 +0000 @@ -100,6 +100,11 @@ def __repr__(self): return "" % (idlib.shortnodeid_b2a(self.my_nodeid),) + def have_shares(self): + # quick test to decide if we need to commit to an implicit + # permutation-seed or if we should use a new one + return bool(set(os.listdir(self.sharedir)) - set(["incoming"])) + def add_bucket_counter(self): statefile = os.path.join(self.storedir, "bucket_counter.state") self.bucket_counter = BucketCountingCrawler(self, statefile) diff -Nru tahoe-lafs-1.9.2/src/allmydata/storage_client.py tahoe-lafs-1.10.0/src/allmydata/storage_client.py --- tahoe-lafs-1.9.2/src/allmydata/storage_client.py 2012-06-22 15:18:35.000000000 +0000 +++ tahoe-lafs-1.10.0/src/allmydata/storage_client.py 2013-09-03 15:38:27.000000000 +0000 @@ -29,11 +29,11 @@ # 6: implement other sorts of IStorageClient classes: S3, etc -import time +import re, time from zope.interface import implements from foolscap.api import eventually -from allmydata.interfaces import IStorageBroker, IServer -from allmydata.util import idlib, log +from allmydata.interfaces import IStorageBroker, IDisplayableServer, IServer +from allmydata.util import log, base32 from allmydata.util.assertutil import precondition from allmydata.util.rrefutil import add_version_to_remote_reference from allmydata.util.hashutil import sha1 @@ -74,8 +74,8 @@ self.introducer_client = None # these two are used in unit tests - def test_add_rref(self, serverid, rref): - s = NativeStorageServer(serverid, {}) + def test_add_rref(self, serverid, rref, ann): + s = NativeStorageServer(serverid, ann.copy()) s.rref = rref s._is_connected = True self.servers[serverid] = s @@ -87,21 +87,23 @@ self.introducer_client = ic = introducer_client ic.subscribe_to("storage", self._got_announcement) - def _got_announcement(self, serverid, ann_d): - precondition(isinstance(serverid, str), serverid) - precondition(len(serverid) == 20, serverid) - assert ann_d["service-name"] == "storage" + def _got_announcement(self, key_s, ann): + if key_s is not None: + precondition(isinstance(key_s, str), key_s) + precondition(key_s.startswith("v0-"), key_s) + assert ann["service-name"] == "storage" + s = NativeStorageServer(key_s, ann) + serverid = s.get_serverid() old = self.servers.get(serverid) if old: - if old.get_announcement() == ann_d: + if old.get_announcement() == ann: return # duplicate # replacement del self.servers[serverid] old.stop_connecting() # now we forget about them and start using the new one - dsc = NativeStorageServer(serverid, ann_d) - self.servers[serverid] = dsc - dsc.start_connecting(self.tub, self._trigger_connections) + self.servers[serverid] = s + s.start_connecting(self.tub, self._trigger_connections) # the descriptor will manage their own Reconnector, and each time we # need servers, we'll ask them if they're connected or not. @@ -138,6 +140,24 @@ return self.servers[serverid].get_nickname() return None + def get_stub_server(self, serverid): + if serverid in self.servers: + return self.servers[serverid] + return StubServer(serverid) + +class StubServer: + implements(IDisplayableServer) + def __init__(self, serverid): + self.serverid = serverid # binary tubid + def get_serverid(self): + return self.serverid + def get_name(self): + return base32.b2a(self.serverid)[:8] + def get_longname(self): + return base32.b2a(self.serverid) + def get_nickname(self): + return "?" + class NativeStorageServer: """I hold information about a storage server that we want to connect to. If we are connected, I hold the RemoteReference, their host address, and @@ -158,7 +178,7 @@ VERSION_DEFAULTS = { "http://allmydata.org/tahoe/protocols/storage/v1" : - { "maximum-immutable-share-size": 2**32, + { "maximum-immutable-share-size": 2**32 - 1, "maximum-mutable-share-size": 2*1000*1000*1000, # maximum prior to v1.9.2 "tolerates-immutable-read-overrun": False, "delete-mutable-shares-with-zero-length-writev": False, @@ -166,13 +186,32 @@ "application-version": "unknown: no get_version()", } - def __init__(self, serverid, ann_d, min_shares=1): - self.serverid = serverid - self._tubid = serverid - self.announcement = ann_d + def __init__(self, key_s, ann, min_shares=1): + self.key_s = key_s + self.announcement = ann self.min_shares = min_shares - self.serverid_s = idlib.shortnodeid_b2a(self.serverid) + assert "anonymous-storage-FURL" in ann, ann + furl = str(ann["anonymous-storage-FURL"]) + m = re.match(r'pb://(\w+)@', furl) + assert m, furl + tubid_s = m.group(1).lower() + self._tubid = base32.a2b(tubid_s) + assert "permutation-seed-base32" in ann, ann + ps = base32.a2b(str(ann["permutation-seed-base32"])) + self._permutation_seed = ps + + if key_s: + self._long_description = key_s + if key_s.startswith("v0-"): + # remove v0- prefix from abbreviated name + self._short_description = key_s[3:3+8] + else: + self._short_description = key_s[:8] + else: + self._long_description = tubid_s + self._short_description = tubid_s[:6] + self.announcement_time = time.time() self.last_connect_time = None self.last_loss_time = None @@ -194,24 +233,26 @@ def __repr__(self): return "" % self.get_name() def get_serverid(self): - return self._tubid + return self._tubid # XXX replace with self.key_s def get_permutation_seed(self): - return self._tubid + return self._permutation_seed def get_version(self): if self.rref: return self.rref.version return None def get_name(self): # keep methodname short - return self.serverid_s + # TODO: decide who adds [] in the short description. It should + # probably be the output side, not here. + return self._short_description def get_longname(self): - return idlib.nodeid_b2a(self._tubid) + return self._long_description def get_lease_seed(self): return self._tubid def get_foolscap_write_enabler_seed(self): return self._tubid def get_nickname(self): - return self.announcement["nickname"].decode("utf-8") + return self.announcement["nickname"] def get_announcement(self): return self.announcement def get_remote_host(self): @@ -226,7 +267,7 @@ return self.announcement_time def start_connecting(self, tub, trigger_cb): - furl = self.announcement["FURL"] + furl = str(self.announcement["anonymous-storage-FURL"]) self._trigger_cb = trigger_cb self._reconnector = tub.connectTo(furl, self._got_connection) diff -Nru tahoe-lafs-1.9.2/src/allmydata/test/check_memory.py tahoe-lafs-1.10.0/src/allmydata/test/check_memory.py --- tahoe-lafs-1.9.2/src/allmydata/test/check_memory.py 2012-05-14 02:07:24.000000000 +0000 +++ tahoe-lafs-1.10.0/src/allmydata/test/check_memory.py 2013-09-03 15:38:27.000000000 +0000 @@ -404,7 +404,7 @@ d.addCallback(lambda res: u.upload(upload.FileName(files[name], convergence="check-memory"))) - d.addCallback(lambda results: results.uri) + d.addCallback(lambda results: results.get_uri()) else: raise ValueError("unknown mode=%s" % self.mode) def _complete(uri): diff -Nru tahoe-lafs-1.9.2/src/allmydata/test/common.py tahoe-lafs-1.10.0/src/allmydata/test/common.py --- tahoe-lafs-1.9.2/src/allmydata/test/common.py 2012-06-14 20:09:14.000000000 +0000 +++ tahoe-lafs-1.10.0/src/allmydata/test/common.py 2013-09-03 15:38:27.000000000 +0000 @@ -14,6 +14,7 @@ MDMF_VERSION from allmydata.check_results import CheckResults, CheckAndRepairResults, \ DeepCheckResults, DeepCheckAndRepairResults +from allmydata.storage_client import StubServer from allmydata.mutable.layout import unpack_header from allmydata.mutable.publish import MutableData from allmydata.storage.mutable import MutableShareFile @@ -66,24 +67,27 @@ return self.storage_index def check(self, monitor, verify=False, add_lease=False): - r = CheckResults(self.my_uri, self.storage_index) - data = {} - data["count-shares-needed"] = 3 - data["count-shares-expected"] = 10 - data["count-good-share-hosts"] = 10 - data["count-wrong-shares"] = 0 - nodeid = "\x00"*20 - data["list-corrupt-shares"] = [] - data["sharemap"] = {1: [nodeid]} - data["servers-responding"] = [nodeid] - data["count-recoverable-versions"] = 1 - data["count-unrecoverable-versions"] = 0 - r.set_healthy(True) - r.set_recoverable(True) - data["count-shares-good"] = 10 - r.problems = [] - r.set_data(data) - r.set_needs_rebalancing(False) + s = StubServer("\x00"*20) + r = CheckResults(self.my_uri, self.storage_index, + healthy=True, recoverable=True, + needs_rebalancing=False, + count_shares_needed=3, + count_shares_expected=10, + count_shares_good=10, + count_good_share_hosts=10, + count_recoverable_versions=1, + count_unrecoverable_versions=0, + servers_responding=[s], + sharemap={1: [s]}, + count_wrong_shares=0, + list_corrupt_shares=[], + count_corrupt_shares=0, + list_incompatible_shares=[], + count_incompatible_shares=0, + summary="", + report=[], + share_problems=[], + servermap=None) return defer.succeed(r) def check_and_repair(self, monitor, verify=False, add_lease=False): d = self.check(verify) @@ -113,6 +117,8 @@ except KeyError, le: raise NotEnoughSharesError(le, 0, 3) return len(data) + def get_current_size(self): + return defer.succeed(self.get_size()) def read(self, consumer, offset=0, size=None): # we don't bother to call registerProducer/unregisterProducer, @@ -273,24 +279,27 @@ return self.file_types[self.storage_index] def check(self, monitor, verify=False, add_lease=False): - r = CheckResults(self.my_uri, self.storage_index) - data = {} - data["count-shares-needed"] = 3 - data["count-shares-expected"] = 10 - data["count-good-share-hosts"] = 10 - data["count-wrong-shares"] = 0 - data["list-corrupt-shares"] = [] - nodeid = "\x00"*20 - data["sharemap"] = {"seq1-abcd-sh0": [nodeid]} - data["servers-responding"] = [nodeid] - data["count-recoverable-versions"] = 1 - data["count-unrecoverable-versions"] = 0 - r.set_healthy(True) - r.set_recoverable(True) - data["count-shares-good"] = 10 - r.problems = [] - r.set_data(data) - r.set_needs_rebalancing(False) + s = StubServer("\x00"*20) + r = CheckResults(self.my_uri, self.storage_index, + healthy=True, recoverable=True, + needs_rebalancing=False, + count_shares_needed=3, + count_shares_expected=10, + count_shares_good=10, + count_good_share_hosts=10, + count_recoverable_versions=1, + count_unrecoverable_versions=0, + servers_responding=[s], + sharemap={"seq1-abcd-sh0": [s]}, + count_wrong_shares=0, + list_corrupt_shares=[], + count_corrupt_shares=0, + list_incompatible_shares=[], + count_incompatible_shares=0, + summary="", + report=[], + share_problems=[], + servermap=None) return defer.succeed(r) def check_and_repair(self, monitor, verify=False, add_lease=False): @@ -401,7 +410,7 @@ fingerprint=os.urandom(32)).to_string() def create_mutable_filenode(contents, mdmf=False, all_contents=None): - # XXX: All of these arguments are kind of stupid. + # XXX: All of these arguments are kind of stupid. if mdmf: cap = make_mdmf_mutable_file_cap() else: @@ -461,9 +470,10 @@ iv_dir = self.getdir("introducer") if not os.path.isdir(iv_dir): fileutil.make_dirs(iv_dir) - fileutil.write(os.path.join(iv_dir, 'tahoe.cfg'), \ - "[node]\n" + \ - "web.port = tcp:0:interface=127.0.0.1\n") + fileutil.write(os.path.join(iv_dir, 'tahoe.cfg'), + "[node]\n" + + u"nickname = introducer \u263A\n".encode('utf-8') + + "web.port = tcp:0:interface=127.0.0.1\n") if SYSTEM_TEST_CERTS: os.mkdir(os.path.join(iv_dir, "private")) f = open(os.path.join(iv_dir, "private", "node.pem"), "w") @@ -544,21 +554,26 @@ if self.stats_gatherer_furl: config += "stats_gatherer.furl = %s\n" % self.stats_gatherer_furl + nodeconfig = "[node]\n" + nodeconfig += (u"nickname = client %d \u263A\n" % (i,)).encode('utf-8') + if i == 0: # clients[0] runs a webserver and a helper, no key_generator - config += "[node]\n" + config += nodeconfig config += "web.port = tcp:0:interface=127.0.0.1\n" config += "timeout.keepalive = 600\n" config += "[helper]\n" config += "enabled = True\n" - if i == 3: + elif i == 3: # clients[3] runs a webserver and uses a helper, uses # key_generator if self.key_generator_furl: config += "key_generator.furl = %s\n" % self.key_generator_furl - config += "[node]\n" + config += nodeconfig config += "web.port = tcp:0:interface=127.0.0.1\n" config += "timeout.disconnect = 1800\n" + else: + config += nodeconfig fileutil.write(os.path.join(basedir, 'tahoe.cfg'), config) diff -Nru tahoe-lafs-1.9.2/src/allmydata/test/common_util.py tahoe-lafs-1.10.0/src/allmydata/test/common_util.py --- tahoe-lafs-1.9.2/src/allmydata/test/common_util.py 2012-05-14 02:07:27.000000000 +0000 +++ tahoe-lafs-1.10.0/src/allmydata/test/common_util.py 2013-09-03 15:38:27.000000000 +0000 @@ -45,8 +45,11 @@ try: fileutil.rm_dir(dirpath) finally: - log.err("We were unable to delete a non-ASCII directory %r created by the test. " - "This is liable to cause failures on future builds." % (dirpath,)) + if os.path.exists(dirpath): + msg = ("We were unable to delete a non-ASCII directory %r created by the test. " + "This is liable to cause failures on future builds." % (dirpath,)) + print msg + log.err(msg) self.addCleanup(_cleanup) os.mkdir(dirpath) diff -Nru tahoe-lafs-1.9.2/src/allmydata/test/common_web.py tahoe-lafs-1.10.0/src/allmydata/test/common_web.py --- tahoe-lafs-1.9.2/src/allmydata/test/common_web.py 2012-05-14 02:07:27.000000000 +0000 +++ tahoe-lafs-1.10.0/src/allmydata/test/common_web.py 2013-09-03 15:38:27.000000000 +0000 @@ -61,7 +61,8 @@ class MyGetter(client.HTTPPageGetter): - handleStatus_206 = lambda self: self.handleStatus_200() + handleStatus_206 = lambda self: self.handleStatus_200() # PARTIAL_CONTENT + handleStatus_304 = lambda self: self.handleStatus_200() # NOT_MODIFIED class HTTPClientHEADFactory(client.HTTPClientFactory): protocol = MyGetter diff -Nru tahoe-lafs-1.9.2/src/allmydata/test/no_network.py tahoe-lafs-1.10.0/src/allmydata/test/no_network.py --- tahoe-lafs-1.9.2/src/allmydata/test/no_network.py 2012-05-17 00:16:42.000000000 +0000 +++ tahoe-lafs-1.10.0/src/allmydata/test/no_network.py 2013-09-03 15:38:27.000000000 +0000 @@ -43,6 +43,10 @@ self.hung_until = None self.post_call_notifier = None self.disconnectors = {} + self.counter_by_methname = {} + + def _clear_counters(self): + self.counter_by_methname = {} def callRemoteOnly(self, methname, *args, **kwargs): d = self.callRemote(methname, *args, **kwargs) @@ -62,6 +66,8 @@ kwargs = dict([(k,wrap(kwargs[k])) for k in kwargs]) def _really_call(): + def incr(d, k): d[k] = d.setdefault(k, 0) + 1 + incr(self.counter_by_methname, methname) meth = getattr(self.original, "remote_" + methname) return meth(*args, **kwargs) @@ -320,6 +326,13 @@ ss.hung_until.callback(None) ss.hung_until = None + def nuke_from_orbit(self): + """ Empty all share directories in this grid. It's the only way to be sure ;-) """ + for server in self.servers_by_number.values(): + for prefixdir in os.listdir(server.sharedir): + if prefixdir != 'incoming': + fileutil.rm_dir(os.path.join(server.sharedir, prefixdir)) + class GridTestMixin: def setUp(self): diff -Nru tahoe-lafs-1.9.2/src/allmydata/test/test_backupdb.py tahoe-lafs-1.10.0/src/allmydata/test/test_backupdb.py --- tahoe-lafs-1.9.2/src/allmydata/test/test_backupdb.py 2012-05-14 02:07:27.000000000 +0000 +++ tahoe-lafs-1.10.0/src/allmydata/test/test_backupdb.py 2013-09-03 15:38:27.000000000 +0000 @@ -9,12 +9,10 @@ from allmydata.scripts import backupdb class BackupDB(unittest.TestCase): - def create_or_skip(self, dbfile): + def create(self, dbfile): stderr = StringIO() bdb = backupdb.get_backupdb(dbfile, stderr=stderr) - if not bdb: - if "I was unable to import a python sqlite library" in stderr.getvalue(): - raise unittest.SkipTest("sqlite unavailable, skipping test") + self.failUnless(bdb, "unable to create backupdb from %r" % (dbfile,)) return bdb def skip_if_cannot_represent_filename(self, u): @@ -31,8 +29,7 @@ self.basedir = basedir = os.path.join("backupdb", "create") fileutil.make_dirs(basedir) dbfile = os.path.join(basedir, "dbfile") - bdb = self.create_or_skip(dbfile) - self.failUnless(bdb) + bdb = self.create(dbfile) self.failUnlessEqual(bdb.VERSION, 2) def test_upgrade_v1_v2(self): @@ -43,13 +40,9 @@ created = backupdb.get_backupdb(dbfile, stderr=stderr, create_version=(backupdb.SCHEMA_v1, 1), just_create=True) - if not created: - if "I was unable to import a python sqlite library" in stderr.getvalue(): - raise unittest.SkipTest("sqlite unavailable, skipping test") - self.fail("unable to create v1 backupdb") + self.failUnless(created, "unable to create v1 backupdb") # now we should have a v1 database on disk - bdb = self.create_or_skip(dbfile) - self.failUnless(bdb) + bdb = self.create(dbfile) self.failUnlessEqual(bdb.VERSION, 2) def test_fail(self): @@ -65,12 +58,8 @@ stderr_f) self.failUnlessEqual(bdb, None) stderr = stderr_f.getvalue() - if "I was unable to import a python sqlite library" in stderr: - pass - else: - self.failUnless("backupdb file is unusable" in stderr, stderr) - self.failUnless("file is encrypted or is not a database" in stderr, - stderr) + self.failUnlessIn("backupdb file is unusable", stderr) + self.failUnlessIn("file is encrypted or is not a database", stderr) # put a directory in the way, to exercise a different error path where = os.path.join(basedir, "roadblock-dir") @@ -79,12 +68,8 @@ bdb = backupdb.get_backupdb(where, stderr_f) self.failUnlessEqual(bdb, None) stderr = stderr_f.getvalue() - if "I was unable to import a python sqlite library" in stderr: - pass - else: - self.failUnless(("Unable to create/open backupdb file %s" % where) - in stderr, stderr) - self.failUnless("unable to open database file" in stderr, stderr) + self.failUnlessIn("Unable to create/open backupdb file %s" % (where,), stderr) + self.failUnlessIn("unable to open database file", stderr) def writeto(self, filename, data): @@ -98,8 +83,7 @@ self.basedir = basedir = os.path.join("backupdb", "check") fileutil.make_dirs(basedir) dbfile = os.path.join(basedir, "dbfile") - bdb = self.create_or_skip(dbfile) - self.failUnless(bdb) + bdb = self.create(dbfile) foo_fn = self.writeto("foo.txt", "foo.txt") blah_fn = self.writeto("bar/blah.txt", "blah.txt") @@ -164,7 +148,7 @@ fileutil.make_dirs(basedir) where = os.path.join(basedir, "tooold.db") - bdb = self.create_or_skip(where) + bdb = self.create(where) # reach into the DB and make it old bdb.cursor.execute("UPDATE version SET version=0") bdb.connection.commit() @@ -182,8 +166,7 @@ self.basedir = basedir = os.path.join("backupdb", "directory") fileutil.make_dirs(basedir) dbfile = os.path.join(basedir, "dbfile") - bdb = self.create_or_skip(dbfile) - self.failUnless(bdb) + bdb = self.create(dbfile) contents = {u"file1": "URI:CHK:blah1", u"file2": "URI:CHK:blah2", @@ -245,8 +228,7 @@ self.basedir = basedir = os.path.join("backupdb", "unicode") fileutil.make_dirs(basedir) dbfile = os.path.join(basedir, "dbfile") - bdb = self.create_or_skip(dbfile) - self.failUnless(bdb) + bdb = self.create(dbfile) self.writeto(u"f\u00f6\u00f6.txt", "foo.txt") files = [fn for fn in listdir_unicode(unicode(basedir)) if fn.endswith(".txt")] diff -Nru tahoe-lafs-1.9.2/src/allmydata/test/test_checker.py tahoe-lafs-1.10.0/src/allmydata/test/test_checker.py --- tahoe-lafs-1.9.2/src/allmydata/test/test_checker.py 2012-05-14 02:24:31.000000000 +0000 +++ tahoe-lafs-1.10.0/src/allmydata/test/test_checker.py 2013-09-03 15:38:27.000000000 +0000 @@ -5,6 +5,7 @@ from twisted.internet import defer from allmydata import check_results, uri from allmydata import uri as tahoe_uri +from allmydata.util import base32 from allmydata.web import check_results as web_check_results from allmydata.storage_client import StorageFarmBroker, NativeStorageServer from allmydata.storage.server import storage_index_to_dir @@ -22,19 +23,26 @@ def create_fake_client(self): sb = StorageFarmBroker(None, True) - for (peerid, nickname) in [("\x00"*20, "peer-0"), - ("\xff"*20, "peer-f"), - ("\x11"*20, "peer-11")] : - ann_d = { "version": 0, - "service-name": "storage", - "FURL": "fake furl", - "nickname": unicode(nickname), - "app-versions": {}, # need #466 and v2 introducer - "my-version": "ver", - "oldest-supported": "oldest", - } - s = NativeStorageServer(peerid, ann_d) - sb.test_add_server(peerid, s) + # s.get_name() (the "short description") will be "v0-00000000". + # s.get_longname() will include the -long suffix. + # s.get_peerid() (i.e. tubid) will be "aaa.." or "777.." or "ceir.." + servers = [("v0-00000000-long", "\x00"*20, "peer-0"), + ("v0-ffffffff-long", "\xff"*20, "peer-f"), + ("v0-11111111-long", "\x11"*20, "peer-11")] + for (key_s, peerid, nickname) in servers: + tubid_b32 = base32.b2a(peerid) + furl = "pb://%s@nowhere/fake" % tubid_b32 + ann = { "version": 0, + "service-name": "storage", + "anonymous-storage-FURL": furl, + "permutation-seed-base32": "", + "nickname": unicode(nickname), + "app-versions": {}, # need #466 and v2 introducer + "my-version": "ver", + "oldest-supported": "oldest", + } + s = NativeStorageServer(key_s, ann) + sb.test_add_server(peerid, s) # XXX: maybe use key_s? c = FakeClient() c.storage_broker = sb return c @@ -70,26 +78,32 @@ def test_check(self): c = self.create_fake_client() + sb = c.storage_broker serverid_1 = "\x00"*20 serverid_f = "\xff"*20 + server_1 = sb.get_stub_server(serverid_1) + server_f = sb.get_stub_server(serverid_f) u = uri.CHKFileURI("\x00"*16, "\x00"*32, 3, 10, 1234) - cr = check_results.CheckResults(u, u.get_storage_index()) - cr.set_healthy(True) - cr.set_needs_rebalancing(False) - cr.set_summary("groovy") - data = { "count-shares-needed": 3, - "count-shares-expected": 9, - "count-shares-good": 10, - "count-good-share-hosts": 11, - "list-corrupt-shares": [], - "count-wrong-shares": 0, - "sharemap": {"shareid1": [serverid_1, serverid_f]}, - "count-recoverable-versions": 1, - "count-unrecoverable-versions": 0, - "servers-responding": [], + data = { "count_shares_needed": 3, + "count_shares_expected": 9, + "count_shares_good": 10, + "count_good_share_hosts": 11, + "count_recoverable_versions": 1, + "count_unrecoverable_versions": 0, + "servers_responding": [], + "sharemap": {"shareid1": [server_1, server_f]}, + "count_wrong_shares": 0, + "list_corrupt_shares": [], + "count_corrupt_shares": 0, + "list_incompatible_shares": [], + "count_incompatible_shares": 0, + "report": [], "share_problems": [], "servermap": None, } - cr.set_data(data) - + cr = check_results.CheckResults(u, u.get_storage_index(), + healthy=True, recoverable=True, + needs_rebalancing=False, + summary="groovy", + **data) w = web_check_results.CheckResultsRenderer(c, cr) html = self.render2(w) s = self.remove_tags(html) @@ -101,25 +115,32 @@ self.failUnlessIn("Wrong Shares: 0", s) self.failUnlessIn("Recoverable Versions: 1", s) self.failUnlessIn("Unrecoverable Versions: 0", s) + self.failUnlessIn("Good Shares (sorted in share order): Share ID Nickname Node ID shareid1 peer-0 00000000 peer-f ffffffff", s) - cr.set_healthy(False) - cr.set_recoverable(True) - cr.set_summary("ungroovy") + cr = check_results.CheckResults(u, u.get_storage_index(), + healthy=False, recoverable=True, + needs_rebalancing=False, + summary="ungroovy", + **data) + w = web_check_results.CheckResultsRenderer(c, cr) html = self.render2(w) s = self.remove_tags(html) self.failUnlessIn("File Check Results for SI=2k6avp", s) # abbreviated self.failUnlessIn("Not Healthy! : ungroovy", s) - cr.set_healthy(False) - cr.set_recoverable(False) - cr.set_summary("rather dead") - data["list-corrupt-shares"] = [(serverid_1, u.get_storage_index(), 2)] - cr.set_data(data) + data["count_corrupt_shares"] = 1 + data["list_corrupt_shares"] = [(server_1, u.get_storage_index(), 2)] + cr = check_results.CheckResults(u, u.get_storage_index(), + healthy=False, recoverable=False, + needs_rebalancing=False, + summary="rather dead", + **data) + w = web_check_results.CheckResultsRenderer(c, cr) html = self.render2(w) s = self.remove_tags(html) self.failUnlessIn("File Check Results for SI=2k6avp", s) # abbreviated self.failUnlessIn("Not Recoverable! : rather dead", s) - self.failUnlessIn("Corrupt shares: Share ID Nickname Node ID sh#2 peer-0 aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", s) + self.failUnlessIn("Corrupt shares: Share ID Nickname Node ID sh#2 peer-0 00000000", s) html = self.render2(w) s = self.remove_tags(html) @@ -142,16 +163,14 @@ 'count-unrecoverable-versions': 0, 'count-shares-needed': 3, 'sharemap': {"shareid1": - ["aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", - "77777777777777777777777777777777"]}, + ["v0-00000000-long", "v0-ffffffff-long"]}, 'count-recoverable-versions': 1, 'list-corrupt-shares': - [["aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", - "2k6avpjga3dho3zsjo6nnkt7n4", 2]], + [["v0-00000000-long", "2k6avpjga3dho3zsjo6nnkt7n4", 2]], 'count-good-share-hosts': 11, 'count-wrong-shares': 0, 'count-shares-good': 10, - 'count-corrupt-shares': 0, + 'count-corrupt-shares': 1, 'servers-responding': [], 'recoverable': False, } @@ -168,45 +187,54 @@ def test_check_and_repair(self): c = self.create_fake_client() + sb = c.storage_broker serverid_1 = "\x00"*20 serverid_f = "\xff"*20 u = uri.CHKFileURI("\x00"*16, "\x00"*32, 3, 10, 1234) - pre_cr = check_results.CheckResults(u, u.get_storage_index()) - pre_cr.set_healthy(False) - pre_cr.set_recoverable(True) - pre_cr.set_needs_rebalancing(False) - pre_cr.set_summary("illing") - data = { "count-shares-needed": 3, - "count-shares-expected": 10, - "count-shares-good": 6, - "count-good-share-hosts": 7, - "list-corrupt-shares": [], - "count-wrong-shares": 0, - "sharemap": {"shareid1": [serverid_1, serverid_f]}, - "count-recoverable-versions": 1, - "count-unrecoverable-versions": 0, - "servers-responding": [], + data = { "count_shares_needed": 3, + "count_shares_expected": 10, + "count_shares_good": 6, + "count_good_share_hosts": 7, + "count_recoverable_versions": 1, + "count_unrecoverable_versions": 0, + "servers_responding": [], + "sharemap": {"shareid1": [sb.get_stub_server(serverid_1), + sb.get_stub_server(serverid_f)]}, + "count_wrong_shares": 0, + "list_corrupt_shares": [], + "count_corrupt_shares": 0, + "list_incompatible_shares": [], + "count_incompatible_shares": 0, + "report": [], "share_problems": [], "servermap": None, } - pre_cr.set_data(data) - - post_cr = check_results.CheckResults(u, u.get_storage_index()) - post_cr.set_healthy(True) - post_cr.set_recoverable(True) - post_cr.set_needs_rebalancing(False) - post_cr.set_summary("groovy") - data = { "count-shares-needed": 3, - "count-shares-expected": 10, - "count-shares-good": 10, - "count-good-share-hosts": 11, - "list-corrupt-shares": [], - "count-wrong-shares": 0, - "sharemap": {"shareid1": [serverid_1, serverid_f]}, - "count-recoverable-versions": 1, - "count-unrecoverable-versions": 0, - "servers-responding": [], + pre_cr = check_results.CheckResults(u, u.get_storage_index(), + healthy=False, recoverable=True, + needs_rebalancing=False, + summary="illing", + **data) + + data = { "count_shares_needed": 3, + "count_shares_expected": 10, + "count_shares_good": 10, + "count_good_share_hosts": 11, + "count_recoverable_versions": 1, + "count_unrecoverable_versions": 0, + "servers_responding": [], + "sharemap": {"shareid1": [sb.get_stub_server(serverid_1), + sb.get_stub_server(serverid_f)]}, + "count_wrong_shares": 0, + "count_corrupt_shares": 0, + "list_corrupt_shares": [], + "list_incompatible_shares": [], + "count_incompatible_shares": 0, + "report": [], "share_problems": [], "servermap": None, } - post_cr.set_data(data) + post_cr = check_results.CheckResults(u, u.get_storage_index(), + healthy=True, recoverable=True, + needs_rebalancing=False, + summary="groovy", + **data) crr = check_results.CheckAndRepairResults(u.get_storage_index()) crr.pre_repair_results = pre_cr @@ -235,8 +263,12 @@ crr.repair_attempted = True crr.repair_successful = False - post_cr.set_healthy(False) - post_cr.set_summary("better") + post_cr = check_results.CheckResults(u, u.get_storage_index(), + healthy=False, recoverable=True, + needs_rebalancing=False, + summary="better", + **data) + crr.post_repair_results = post_cr html = self.render2(w) s = self.remove_tags(html) @@ -247,9 +279,12 @@ crr.repair_attempted = True crr.repair_successful = False - post_cr.set_healthy(False) - post_cr.set_recoverable(False) - post_cr.set_summary("worse") + post_cr = check_results.CheckResults(u, u.get_storage_index(), + healthy=False, recoverable=False, + needs_rebalancing=False, + summary="worse", + **data) + crr.post_repair_results = post_cr html = self.render2(w) s = self.remove_tags(html) @@ -339,7 +374,7 @@ DATA = "data" * 100 d = c0.upload(Data(DATA, convergence="")) def _stash_immutable(ur): - self.imm = c0.create_node_from_uri(ur.uri) + self.imm = c0.create_node_from_uri(ur.get_uri()) self.uri = self.imm.get_uri() d.addCallback(_stash_immutable) d.addCallback(lambda ign: @@ -358,10 +393,10 @@ def _check_and_repair(_): return self.imm.check_and_repair(Monitor()) def _check_counts(crr, shares_good, good_share_hosts): - p_crr = crr.get_post_repair_results().data + prr = crr.get_post_repair_results() #print self._pretty_shares_chart(self.uri) - self.failUnlessEqual(p_crr['count-shares-good'], shares_good) - self.failUnlessEqual(p_crr['count-good-share-hosts'], + self.failUnlessEqual(prr.get_share_counter_good(), shares_good) + self.failUnlessEqual(prr.get_host_counter_good_shares(), good_share_hosts) """ @@ -396,7 +431,7 @@ DATA = "data" * 100 d = c0.upload(Data(DATA, convergence="")) def _stash_immutable(ur): - self.imm = c0.create_node_from_uri(ur.uri) + self.imm = c0.create_node_from_uri(ur.get_uri()) d.addCallback(_stash_immutable) d.addCallback(lambda ign: c0.create_mutable_file(MutableData("contents"))) @@ -489,7 +524,7 @@ return self.c0.upload(Data(DATA, convergence="")) d.addCallback(_start) def _do_check(ur): - n = self.c0.create_node_from_uri(ur.uri) + n = self.c0.create_node_from_uri(ur.get_uri()) return n.check(Monitor(), verify=True) d.addCallback(_do_check) def _check(cr): diff -Nru tahoe-lafs-1.9.2/src/allmydata/test/test_cli.py tahoe-lafs-1.10.0/src/allmydata/test/test_cli.py --- tahoe-lafs-1.9.2/src/allmydata/test/test_cli.py 2012-05-14 02:07:27.000000000 +0000 +++ tahoe-lafs-1.10.0/src/allmydata/test/test_cli.py 2013-09-03 15:38:27.000000000 +0000 @@ -2,17 +2,20 @@ import os.path from twisted.trial import unittest from cStringIO import StringIO -import urllib, re +import urllib, re, sys import simplejson -from mock import patch +from mock import patch, Mock, call -from allmydata.util import fileutil, hashutil, base32 +from allmydata.util import fileutil, hashutil, base32, keyutil from allmydata import uri from allmydata.immutable import upload from allmydata.interfaces import MDMF_VERSION, SDMF_VERSION from allmydata.mutable.publish import MutableData from allmydata.dirnode import normalize +from allmydata.scripts.common_http import socket_error +import allmydata.scripts.common_http +from pycryptopp.publickey import ed25519 # Test that the scripts can be imported. from allmydata.scripts import create_node, debug, keygen, startstop_node, \ @@ -41,13 +44,19 @@ timeout = 480 # deep_check takes 360s on Zandr's linksys box, others take > 240s +def parse_options(basedir, command, args): + o = runner.Options() + o.parseOptions(["--node-directory", basedir, command] + args) + while hasattr(o, "subOptions"): + o = o.subOptions + return o class CLITestMixin(ReallyEqualMixin): def do_cli(self, verb, *args, **kwargs): nodeargs = [ "--node-directory", self.get_clientdir(), ] - argv = [verb] + nodeargs + list(args) + argv = nodeargs + [verb] + list(args) stdin = kwargs.get("stdin", "") stdout, stderr = StringIO(), StringIO() d = threads.deferToThread(runner.runner, argv, run_by_human=False, @@ -70,69 +79,6 @@ class CLI(CLITestMixin, unittest.TestCase): - # this test case only looks at argument-processing and simple stuff. - def test_options(self): - fileutil.rm_dir("cli/test_options") - fileutil.make_dirs("cli/test_options") - fileutil.make_dirs("cli/test_options/private") - fileutil.write("cli/test_options/node.url", "http://localhost:8080/\n") - filenode_uri = uri.WriteableSSKFileURI(writekey="\x00"*16, - fingerprint="\x00"*32) - private_uri = uri.DirectoryURI(filenode_uri).to_string() - fileutil.write("cli/test_options/private/root_dir.cap", private_uri + "\n") - o = cli.ListOptions() - o.parseOptions(["--node-directory", "cli/test_options"]) - self.failUnlessReallyEqual(o['node-url'], "http://localhost:8080/") - self.failUnlessReallyEqual(o.aliases[DEFAULT_ALIAS], private_uri) - self.failUnlessReallyEqual(o.where, u"") - - o = cli.ListOptions() - o.parseOptions(["--node-directory", "cli/test_options", - "--node-url", "http://example.org:8111/"]) - self.failUnlessReallyEqual(o['node-url'], "http://example.org:8111/") - self.failUnlessReallyEqual(o.aliases[DEFAULT_ALIAS], private_uri) - self.failUnlessReallyEqual(o.where, u"") - - o = cli.ListOptions() - o.parseOptions(["--node-directory", "cli/test_options", - "--dir-cap", "root"]) - self.failUnlessReallyEqual(o['node-url'], "http://localhost:8080/") - self.failUnlessReallyEqual(o.aliases[DEFAULT_ALIAS], "root") - self.failUnlessReallyEqual(o.where, u"") - - o = cli.ListOptions() - other_filenode_uri = uri.WriteableSSKFileURI(writekey="\x11"*16, - fingerprint="\x11"*32) - other_uri = uri.DirectoryURI(other_filenode_uri).to_string() - o.parseOptions(["--node-directory", "cli/test_options", - "--dir-cap", other_uri]) - self.failUnlessReallyEqual(o['node-url'], "http://localhost:8080/") - self.failUnlessReallyEqual(o.aliases[DEFAULT_ALIAS], other_uri) - self.failUnlessReallyEqual(o.where, u"") - - o = cli.ListOptions() - o.parseOptions(["--node-directory", "cli/test_options", - "--dir-cap", other_uri, "subdir"]) - self.failUnlessReallyEqual(o['node-url'], "http://localhost:8080/") - self.failUnlessReallyEqual(o.aliases[DEFAULT_ALIAS], other_uri) - self.failUnlessReallyEqual(o.where, u"subdir") - - o = cli.ListOptions() - self.failUnlessRaises(usage.UsageError, - o.parseOptions, - ["--node-directory", "cli/test_options", - "--node-url", "NOT-A-URL"]) - - o = cli.ListOptions() - o.parseOptions(["--node-directory", "cli/test_options", - "--node-url", "http://localhost:8080"]) - self.failUnlessReallyEqual(o["node-url"], "http://localhost:8080/") - - o = cli.ListOptions() - o.parseOptions(["--node-directory", "cli/test_options", - "--node-url", "https://localhost/"]) - self.failUnlessReallyEqual(o["node-url"], "https://localhost/") - def _dump_cap(self, *args): config = debug.DumpCapOptions() config.stdout,config.stderr = StringIO(), StringIO() @@ -451,26 +397,31 @@ "didn't see 'mqfblse6m5a6dh45isu2cg7oji' in '%s'" % err) def test_alias(self): - aliases = {"tahoe": "TA", - "work": "WA", - "c": "CA"} + def s128(c): return base32.b2a(c*(128/8)) + def s256(c): return base32.b2a(c*(256/8)) + TA = "URI:DIR2:%s:%s" % (s128("T"), s256("T")) + WA = "URI:DIR2:%s:%s" % (s128("W"), s256("W")) + CA = "URI:DIR2:%s:%s" % (s128("C"), s256("C")) + aliases = {"tahoe": TA, + "work": WA, + "c": CA} def ga1(path): return get_alias(aliases, path, u"tahoe") uses_lettercolon = common.platform_uses_lettercolon_drivename() - self.failUnlessReallyEqual(ga1(u"bare"), ("TA", "bare")) - self.failUnlessReallyEqual(ga1(u"baredir/file"), ("TA", "baredir/file")) - self.failUnlessReallyEqual(ga1(u"baredir/file:7"), ("TA", "baredir/file:7")) - self.failUnlessReallyEqual(ga1(u"tahoe:"), ("TA", "")) - self.failUnlessReallyEqual(ga1(u"tahoe:file"), ("TA", "file")) - self.failUnlessReallyEqual(ga1(u"tahoe:dir/file"), ("TA", "dir/file")) - self.failUnlessReallyEqual(ga1(u"work:"), ("WA", "")) - self.failUnlessReallyEqual(ga1(u"work:file"), ("WA", "file")) - self.failUnlessReallyEqual(ga1(u"work:dir/file"), ("WA", "dir/file")) + self.failUnlessReallyEqual(ga1(u"bare"), (TA, "bare")) + self.failUnlessReallyEqual(ga1(u"baredir/file"), (TA, "baredir/file")) + self.failUnlessReallyEqual(ga1(u"baredir/file:7"), (TA, "baredir/file:7")) + self.failUnlessReallyEqual(ga1(u"tahoe:"), (TA, "")) + self.failUnlessReallyEqual(ga1(u"tahoe:file"), (TA, "file")) + self.failUnlessReallyEqual(ga1(u"tahoe:dir/file"), (TA, "dir/file")) + self.failUnlessReallyEqual(ga1(u"work:"), (WA, "")) + self.failUnlessReallyEqual(ga1(u"work:file"), (WA, "file")) + self.failUnlessReallyEqual(ga1(u"work:dir/file"), (WA, "dir/file")) # default != None means we really expect a tahoe path, regardless of # whether we're on windows or not. This is what 'tahoe get' uses. - self.failUnlessReallyEqual(ga1(u"c:"), ("CA", "")) - self.failUnlessReallyEqual(ga1(u"c:file"), ("CA", "file")) - self.failUnlessReallyEqual(ga1(u"c:dir/file"), ("CA", "dir/file")) + self.failUnlessReallyEqual(ga1(u"c:"), (CA, "")) + self.failUnlessReallyEqual(ga1(u"c:file"), (CA, "file")) + self.failUnlessReallyEqual(ga1(u"c:dir/file"), (CA, "dir/file")) self.failUnlessReallyEqual(ga1(u"URI:stuff"), ("URI:stuff", "")) self.failUnlessReallyEqual(ga1(u"URI:stuff/file"), ("URI:stuff", "file")) self.failUnlessReallyEqual(ga1(u"URI:stuff:./file"), ("URI:stuff", "file")) @@ -489,9 +440,9 @@ (DefaultAliasMarker, "baredir/file:7")) self.failUnlessReallyEqual(ga2(u"baredir/sub:1/file:7"), (DefaultAliasMarker, "baredir/sub:1/file:7")) - self.failUnlessReallyEqual(ga2(u"tahoe:"), ("TA", "")) - self.failUnlessReallyEqual(ga2(u"tahoe:file"), ("TA", "file")) - self.failUnlessReallyEqual(ga2(u"tahoe:dir/file"), ("TA", "dir/file")) + self.failUnlessReallyEqual(ga2(u"tahoe:"), (TA, "")) + self.failUnlessReallyEqual(ga2(u"tahoe:file"), (TA, "file")) + self.failUnlessReallyEqual(ga2(u"tahoe:dir/file"), (TA, "dir/file")) # on windows, we really want c:foo to indicate a local file. # default==None is what 'tahoe cp' uses. if uses_lettercolon: @@ -500,12 +451,12 @@ self.failUnlessReallyEqual(ga2(u"c:dir/file"), (DefaultAliasMarker, "c:dir/file")) else: - self.failUnlessReallyEqual(ga2(u"c:"), ("CA", "")) - self.failUnlessReallyEqual(ga2(u"c:file"), ("CA", "file")) - self.failUnlessReallyEqual(ga2(u"c:dir/file"), ("CA", "dir/file")) - self.failUnlessReallyEqual(ga2(u"work:"), ("WA", "")) - self.failUnlessReallyEqual(ga2(u"work:file"), ("WA", "file")) - self.failUnlessReallyEqual(ga2(u"work:dir/file"), ("WA", "dir/file")) + self.failUnlessReallyEqual(ga2(u"c:"), (CA, "")) + self.failUnlessReallyEqual(ga2(u"c:file"), (CA, "file")) + self.failUnlessReallyEqual(ga2(u"c:dir/file"), (CA, "dir/file")) + self.failUnlessReallyEqual(ga2(u"work:"), (WA, "")) + self.failUnlessReallyEqual(ga2(u"work:file"), (WA, "file")) + self.failUnlessReallyEqual(ga2(u"work:dir/file"), (WA, "dir/file")) self.failUnlessReallyEqual(ga2(u"URI:stuff"), ("URI:stuff", "")) self.failUnlessReallyEqual(ga2(u"URI:stuff/file"), ("URI:stuff", "file")) self.failUnlessReallyEqual(ga2(u"URI:stuff:./file"), ("URI:stuff", "file")) @@ -530,16 +481,16 @@ (DefaultAliasMarker, "baredir/file:7")) self.failUnlessReallyEqual(ga3(u"baredir/sub:1/file:7"), (DefaultAliasMarker, "baredir/sub:1/file:7")) - self.failUnlessReallyEqual(ga3(u"tahoe:"), ("TA", "")) - self.failUnlessReallyEqual(ga3(u"tahoe:file"), ("TA", "file")) - self.failUnlessReallyEqual(ga3(u"tahoe:dir/file"), ("TA", "dir/file")) + self.failUnlessReallyEqual(ga3(u"tahoe:"), (TA, "")) + self.failUnlessReallyEqual(ga3(u"tahoe:file"), (TA, "file")) + self.failUnlessReallyEqual(ga3(u"tahoe:dir/file"), (TA, "dir/file")) self.failUnlessReallyEqual(ga3(u"c:"), (DefaultAliasMarker, "c:")) self.failUnlessReallyEqual(ga3(u"c:file"), (DefaultAliasMarker, "c:file")) self.failUnlessReallyEqual(ga3(u"c:dir/file"), (DefaultAliasMarker, "c:dir/file")) - self.failUnlessReallyEqual(ga3(u"work:"), ("WA", "")) - self.failUnlessReallyEqual(ga3(u"work:file"), ("WA", "file")) - self.failUnlessReallyEqual(ga3(u"work:dir/file"), ("WA", "dir/file")) + self.failUnlessReallyEqual(ga3(u"work:"), (WA, "")) + self.failUnlessReallyEqual(ga3(u"work:file"), (WA, "file")) + self.failUnlessReallyEqual(ga3(u"work:dir/file"), (WA, "dir/file")) self.failUnlessReallyEqual(ga3(u"URI:stuff"), ("URI:stuff", "")) self.failUnlessReallyEqual(ga3(u"URI:stuff:./file"), ("URI:stuff", "file")) self.failUnlessReallyEqual(ga3(u"URI:stuff:./dir/file"), ("URI:stuff", "dir/file")) @@ -564,6 +515,19 @@ return retval self.failUnlessRaises(common.UnknownAliasError, ga5, u"C:\\Windows") + def test_alias_tolerance(self): + def s128(c): return base32.b2a(c*(128/8)) + def s256(c): return base32.b2a(c*(256/8)) + TA = "URI:DIR2:%s:%s" % (s128("T"), s256("T")) + aliases = {"present": TA, + "future": "URI-FROM-FUTURE:ooh:aah"} + def ga1(path): + return get_alias(aliases, path, u"tahoe") + self.failUnlessReallyEqual(ga1(u"present:file"), (TA, "file")) + # this throws, via assert IDirnodeURI.providedBy(), since get_alias() + # wants a dirnode, and the future cap gives us UnknownURI instead. + self.failUnlessRaises(AssertionError, ga1, u"future:stuff") + def test_listdir_unicode_good(self): filenames = [u'L\u00F4zane', u'Bern', u'Gen\u00E8ve'] # must be NFC @@ -579,124 +543,159 @@ for file in listdir_unicode(unicode(basedir)): self.failUnlessIn(normalize(file), filenames) + def test_exception_catcher(self): + self.basedir = "cli/exception_catcher" + + runner_mock = Mock() + sys_exit_mock = Mock() + stderr = StringIO() + self.patch(sys, "argv", ["tahoe"]) + self.patch(runner, "runner", runner_mock) + self.patch(sys, "exit", sys_exit_mock) + self.patch(sys, "stderr", stderr) + exc = Exception("canary") + + def call_runner(args, install_node_control=True): + raise exc + runner_mock.side_effect = call_runner + + runner.run() + self.failUnlessEqual(runner_mock.call_args_list, [call([], install_node_control=True)]) + self.failUnlessEqual(sys_exit_mock.call_args_list, [call(1)]) + self.failUnlessIn(str(exc), stderr.getvalue()) + class Help(unittest.TestCase): def test_get(self): help = str(cli.GetOptions()) - self.failUnlessIn(" get [options] REMOTE_FILE LOCAL_FILE", help) + self.failUnlessIn(" [global-opts] get [options] REMOTE_FILE LOCAL_FILE", help) self.failUnlessIn("% tahoe get FOO |less", help) def test_put(self): help = str(cli.PutOptions()) - self.failUnlessIn(" put [options] LOCAL_FILE REMOTE_FILE", help) + self.failUnlessIn(" [global-opts] put [options] LOCAL_FILE REMOTE_FILE", help) self.failUnlessIn("% cat FILE | tahoe put", help) + def test_ls(self): + help = str(cli.ListOptions()) + self.failUnlessIn(" [global-opts] ls [options] [PATH]", help) + def test_unlink(self): help = str(cli.UnlinkOptions()) - self.failUnlessIn(" unlink [options] REMOTE_FILE", help) + self.failUnlessIn(" [global-opts] unlink [options] REMOTE_FILE", help) def test_rm(self): help = str(cli.RmOptions()) - self.failUnlessIn(" rm [options] REMOTE_FILE", help) + self.failUnlessIn(" [global-opts] rm [options] REMOTE_FILE", help) def test_mv(self): help = str(cli.MvOptions()) - self.failUnlessIn(" mv [options] FROM TO", help) + self.failUnlessIn(" [global-opts] mv [options] FROM TO", help) self.failUnlessIn("Use 'tahoe mv' to move files", help) def test_cp(self): help = str(cli.CpOptions()) - self.failUnlessIn(" cp [options] FROM.. TO", help) + self.failUnlessIn(" [global-opts] cp [options] FROM.. TO", help) self.failUnlessIn("Use 'tahoe cp' to copy files", help) def test_ln(self): help = str(cli.LnOptions()) - self.failUnlessIn(" ln [options] FROM_LINK TO_LINK", help) + self.failUnlessIn(" [global-opts] ln [options] FROM_LINK TO_LINK", help) self.failUnlessIn("Use 'tahoe ln' to duplicate a link", help) def test_mkdir(self): help = str(cli.MakeDirectoryOptions()) - self.failUnlessIn(" mkdir [options] [REMOTE_DIR]", help) + self.failUnlessIn(" [global-opts] mkdir [options] [REMOTE_DIR]", help) self.failUnlessIn("Create a new directory", help) def test_backup(self): help = str(cli.BackupOptions()) - self.failUnlessIn(" backup [options] FROM ALIAS:TO", help) + self.failUnlessIn(" [global-opts] backup [options] FROM ALIAS:TO", help) def test_webopen(self): help = str(cli.WebopenOptions()) - self.failUnlessIn(" webopen [options] [ALIAS:PATH]", help) + self.failUnlessIn(" [global-opts] webopen [options] [ALIAS:PATH]", help) def test_manifest(self): help = str(cli.ManifestOptions()) - self.failUnlessIn(" manifest [options] [ALIAS:PATH]", help) + self.failUnlessIn(" [global-opts] manifest [options] [ALIAS:PATH]", help) def test_stats(self): help = str(cli.StatsOptions()) - self.failUnlessIn(" stats [options] [ALIAS:PATH]", help) + self.failUnlessIn(" [global-opts] stats [options] [ALIAS:PATH]", help) def test_check(self): help = str(cli.CheckOptions()) - self.failUnlessIn(" check [options] [ALIAS:PATH]", help) + self.failUnlessIn(" [global-opts] check [options] [ALIAS:PATH]", help) def test_deep_check(self): help = str(cli.DeepCheckOptions()) - self.failUnlessIn(" deep-check [options] [ALIAS:PATH]", help) + self.failUnlessIn(" [global-opts] deep-check [options] [ALIAS:PATH]", help) def test_create_alias(self): help = str(cli.CreateAliasOptions()) - self.failUnlessIn(" create-alias [options] ALIAS[:]", help) + self.failUnlessIn(" [global-opts] create-alias [options] ALIAS[:]", help) def test_add_alias(self): help = str(cli.AddAliasOptions()) - self.failUnlessIn(" add-alias [options] ALIAS[:] DIRCAP", help) + self.failUnlessIn(" [global-opts] add-alias [options] ALIAS[:] DIRCAP", help) def test_list_aliases(self): help = str(cli.ListAliasesOptions()) - self.failUnlessIn(" list-aliases [options]", help) + self.failUnlessIn(" [global-opts] list-aliases [options]", help) def test_start(self): help = str(startstop_node.StartOptions()) - self.failUnlessIn(" start [options] [NODEDIR]", help) + self.failUnlessIn(" [global-opts] start [options] [NODEDIR]", help) def test_stop(self): help = str(startstop_node.StopOptions()) - self.failUnlessIn(" stop [options] [NODEDIR]", help) + self.failUnlessIn(" [global-opts] stop [options] [NODEDIR]", help) def test_restart(self): help = str(startstop_node.RestartOptions()) - self.failUnlessIn(" restart [options] [NODEDIR]", help) + self.failUnlessIn(" [global-opts] restart [options] [NODEDIR]", help) def test_run(self): help = str(startstop_node.RunOptions()) - self.failUnlessIn(" run [options] [NODEDIR]", help) + self.failUnlessIn(" [global-opts] run [options] [NODEDIR]", help) def test_create_client(self): help = str(create_node.CreateClientOptions()) - self.failUnlessIn(" create-client [options] [NODEDIR]", help) + self.failUnlessIn(" [global-opts] create-client [options] [NODEDIR]", help) def test_create_node(self): help = str(create_node.CreateNodeOptions()) - self.failUnlessIn(" create-node [options] [NODEDIR]", help) + self.failUnlessIn(" [global-opts] create-node [options] [NODEDIR]", help) def test_create_introducer(self): help = str(create_node.CreateIntroducerOptions()) - self.failUnlessIn(" create-introducer [options] NODEDIR", help) + self.failUnlessIn(" [global-opts] create-introducer [options] NODEDIR", help) def test_debug_trial(self): help = str(debug.TrialOptions()) - self.failUnlessIn(" debug trial [options] [[file|package|module|TestCase|testmethod]...]", help) + self.failUnlessIn(" [global-opts] debug trial [options] [[file|package|module|TestCase|testmethod]...]", help) self.failUnlessIn("The 'tahoe debug trial' command uses the correct imports", help) + def test_debug_flogtool(self): + options = debug.FlogtoolOptions() + help = str(options) + self.failUnlessIn(" [global-opts] debug flogtool ", help) + self.failUnlessIn("The 'tahoe debug flogtool' command uses the correct imports", help) + + for (option, shortcut, oClass, desc) in options.subCommands: + subhelp = str(oClass()) + self.failUnlessIn(" [global-opts] debug flogtool %s " % (option,), subhelp) + class CreateAlias(GridTestMixin, CLITestMixin, unittest.TestCase): def _test_webopen(self, args, expected_url): - woo = cli.WebopenOptions() - all_args = ["--node-directory", self.get_clientdir()] + list(args) - woo.parseOptions(all_args) + o = runner.Options() + o.parseOptions(["--node-directory", self.get_clientdir(), "webopen"] + + list(args)) urls = [] - rc = cli.webopen(woo, urls.append) + rc = cli.webopen(o, urls.append) self.failUnlessReallyEqual(rc, 0) self.failUnlessReallyEqual(len(urls), 1) self.failUnlessReallyEqual(urls[0], expected_url) @@ -1135,8 +1134,20 @@ d = self.do_cli("create-alias", "tahoe") d.addCallback(lambda res: self.do_cli("put", "--mutable", fn1, "tahoe:uploaded.txt")) + def _check(res): + (rc, out, err) = res + self.failUnlessEqual(rc, 0, str(res)) + self.failUnlessEqual(err.strip(), "201 Created", str(res)) + self.uri = out + d.addCallback(_check) d.addCallback(lambda res: self.do_cli("put", fn2, "tahoe:uploaded.txt")) + def _check2(res): + (rc, out, err) = res + self.failUnlessEqual(rc, 0, str(res)) + self.failUnlessEqual(err.strip(), "200 OK", str(res)) + self.failUnlessEqual(out, self.uri, str(res)) + d.addCallback(_check2) d.addCallback(lambda res: self.do_cli("get", "tahoe:uploaded.txt")) d.addCallback(lambda (rc,out,err): self.failUnlessReallyEqual(out, DATA2)) @@ -1366,6 +1377,55 @@ return d +class Admin(unittest.TestCase): + def do_cli(self, *args, **kwargs): + argv = list(args) + stdin = kwargs.get("stdin", "") + stdout, stderr = StringIO(), StringIO() + d = threads.deferToThread(runner.runner, argv, run_by_human=False, + stdin=StringIO(stdin), + stdout=stdout, stderr=stderr) + def _done(res): + return stdout.getvalue(), stderr.getvalue() + d.addCallback(_done) + return d + + def test_generate_keypair(self): + d = self.do_cli("admin", "generate-keypair") + def _done( (stdout, stderr) ): + lines = [line.strip() for line in stdout.splitlines()] + privkey_bits = lines[0].split() + pubkey_bits = lines[1].split() + sk_header = "private:" + vk_header = "public:" + self.failUnlessEqual(privkey_bits[0], sk_header, lines[0]) + self.failUnlessEqual(pubkey_bits[0], vk_header, lines[1]) + self.failUnless(privkey_bits[1].startswith("priv-v0-"), lines[0]) + self.failUnless(pubkey_bits[1].startswith("pub-v0-"), lines[1]) + sk_bytes = base32.a2b(keyutil.remove_prefix(privkey_bits[1], "priv-v0-")) + sk = ed25519.SigningKey(sk_bytes) + vk_bytes = base32.a2b(keyutil.remove_prefix(pubkey_bits[1], "pub-v0-")) + self.failUnlessEqual(sk.get_verifying_key_bytes(), vk_bytes) + d.addCallback(_done) + return d + + def test_derive_pubkey(self): + priv1,pub1 = keyutil.make_keypair() + d = self.do_cli("admin", "derive-pubkey", priv1) + def _done( (stdout, stderr) ): + lines = stdout.split("\n") + privkey_line = lines[0].strip() + pubkey_line = lines[1].strip() + sk_header = "private: priv-v0-" + vk_header = "public: pub-v0-" + self.failUnless(privkey_line.startswith(sk_header), privkey_line) + self.failUnless(pubkey_line.startswith(vk_header), pubkey_line) + pub2 = pubkey_line[len(vk_header):] + self.failUnlessEqual("pub-v0-"+pub2, pub1) + d.addCallback(_done) + return d + + class List(GridTestMixin, CLITestMixin, unittest.TestCase): def test_list(self): self.basedir = "cli/List/list" @@ -1969,6 +2029,12 @@ results = fileutil.read(fn3) self.failUnlessReallyEqual(results, DATA1) d.addCallback(_get_resp2) + # cp --verbose filename3 dircap:test_file + d.addCallback(lambda ign: + self.do_cli("cp", "--verbose", '--recursive', self.basedir, self.dircap)) + def _test_for_wrong_indices((rc, out, err)): + self.failUnless('examining 1 of 1\n' in err) + d.addCallback(_test_for_wrong_indices) return d def test_cp_with_nonexistent_alias(self): @@ -2375,6 +2441,34 @@ d.addCallback(_got_testdir_json) return d + def test_cp_verbose(self): + self.basedir = "cli/Cp/cp_verbose" + self.set_up_grid() + + # Write two test files, which we'll copy to the grid. + test1_path = os.path.join(self.basedir, "test1") + test2_path = os.path.join(self.basedir, "test2") + fileutil.write(test1_path, "test1") + fileutil.write(test2_path, "test2") + + d = self.do_cli("create-alias", "tahoe") + d.addCallback(lambda ign: + self.do_cli("cp", "--verbose", test1_path, test2_path, "tahoe:")) + def _check(res): + (rc, out, err) = res + self.failUnlessEqual(rc, 0, str(res)) + self.failUnlessIn("Success: files copied", out, str(res)) + self.failUnlessEqual(err, """\ +attaching sources to targets, 2 files / 0 dirs in root +targets assigned, 1 dirs, 2 files +starting copy, 2 files, 1 directories +1/2 files, 0/1 directories +2/2 files, 0/1 directories +1/1 directories +""", str(res)) + d.addCallback(_check) + return d + class Backup(GridTestMixin, CLITestMixin, StallMixin, unittest.TestCase): @@ -2401,8 +2495,9 @@ # is the backupdb available? If so, we test that a second backup does # not create new directories. hush = StringIO() - have_bdb = backupdb.get_backupdb(os.path.join(self.basedir, "dbtest"), - hush) + bdb = backupdb.get_backupdb(os.path.join(self.basedir, "dbtest"), + hush) + self.failUnless(bdb) # create a small local directory with a couple of files source = os.path.join(self.basedir, "home") @@ -2421,13 +2516,6 @@ d = self.do_cli("create-alias", "tahoe") - if not have_bdb: - d.addCallback(lambda res: self.do_cli("backup", source, "tahoe:backups")) - def _should_complain((rc, out, err)): - self.failUnless("I was unable to import a python sqlite library" in err, err) - d.addCallback(_should_complain) - d.addCallback(self.stall, 1.1) # make sure the backups get distinct timestamps - d.addCallback(lambda res: do_backup()) def _check0((rc, out, err)): self.failUnlessReallyEqual(err, "") @@ -2488,61 +2576,56 @@ # available self.failUnlessReallyEqual(err, "") self.failUnlessReallyEqual(rc, 0) - if have_bdb: - fu, fr, fs, dc, dr, ds = self.count_output(out) - # foo.txt, bar.txt, blah.txt - self.failUnlessReallyEqual(fu, 0) - self.failUnlessReallyEqual(fr, 3) - self.failUnlessReallyEqual(fs, 0) - # empty, home, home/parent, home/parent/subdir - self.failUnlessReallyEqual(dc, 0) - self.failUnlessReallyEqual(dr, 4) - self.failUnlessReallyEqual(ds, 0) + fu, fr, fs, dc, dr, ds = self.count_output(out) + # foo.txt, bar.txt, blah.txt + self.failUnlessReallyEqual(fu, 0) + self.failUnlessReallyEqual(fr, 3) + self.failUnlessReallyEqual(fs, 0) + # empty, home, home/parent, home/parent/subdir + self.failUnlessReallyEqual(dc, 0) + self.failUnlessReallyEqual(dr, 4) + self.failUnlessReallyEqual(ds, 0) d.addCallback(_check4a) - if have_bdb: - # sneak into the backupdb, crank back the "last checked" - # timestamp to force a check on all files - def _reset_last_checked(res): - dbfile = os.path.join(self.get_clientdir(), - "private", "backupdb.sqlite") - self.failUnless(os.path.exists(dbfile), dbfile) - bdb = backupdb.get_backupdb(dbfile) - bdb.cursor.execute("UPDATE last_upload SET last_checked=0") - bdb.cursor.execute("UPDATE directories SET last_checked=0") - bdb.connection.commit() - - d.addCallback(_reset_last_checked) - - d.addCallback(self.stall, 1.1) - d.addCallback(lambda res: do_backup(verbose=True)) - def _check4b((rc, out, err)): - # we should check all files, and re-use all of them. None of - # the directories should have been changed, so we should - # re-use all of them too. - self.failUnlessReallyEqual(err, "") - self.failUnlessReallyEqual(rc, 0) - fu, fr, fs, dc, dr, ds = self.count_output(out) - fchecked, dchecked = self.count_output2(out) - self.failUnlessReallyEqual(fchecked, 3) - self.failUnlessReallyEqual(fu, 0) - self.failUnlessReallyEqual(fr, 3) - self.failUnlessReallyEqual(fs, 0) - self.failUnlessReallyEqual(dchecked, 4) - self.failUnlessReallyEqual(dc, 0) - self.failUnlessReallyEqual(dr, 4) - self.failUnlessReallyEqual(ds, 0) - d.addCallback(_check4b) + # sneak into the backupdb, crank back the "last checked" + # timestamp to force a check on all files + def _reset_last_checked(res): + dbfile = os.path.join(self.get_clientdir(), + "private", "backupdb.sqlite") + self.failUnless(os.path.exists(dbfile), dbfile) + bdb = backupdb.get_backupdb(dbfile) + bdb.cursor.execute("UPDATE last_upload SET last_checked=0") + bdb.cursor.execute("UPDATE directories SET last_checked=0") + bdb.connection.commit() + + d.addCallback(_reset_last_checked) + + d.addCallback(self.stall, 1.1) + d.addCallback(lambda res: do_backup(verbose=True)) + def _check4b((rc, out, err)): + # we should check all files, and re-use all of them. None of + # the directories should have been changed, so we should + # re-use all of them too. + self.failUnlessReallyEqual(err, "") + self.failUnlessReallyEqual(rc, 0) + fu, fr, fs, dc, dr, ds = self.count_output(out) + fchecked, dchecked = self.count_output2(out) + self.failUnlessReallyEqual(fchecked, 3) + self.failUnlessReallyEqual(fu, 0) + self.failUnlessReallyEqual(fr, 3) + self.failUnlessReallyEqual(fs, 0) + self.failUnlessReallyEqual(dchecked, 4) + self.failUnlessReallyEqual(dc, 0) + self.failUnlessReallyEqual(dr, 4) + self.failUnlessReallyEqual(ds, 0) + d.addCallback(_check4b) d.addCallback(lambda res: self.do_cli("ls", "tahoe:backups/Archives")) def _check5((rc, out, err)): self.failUnlessReallyEqual(err, "") self.failUnlessReallyEqual(rc, 0) self.new_archives = out.split() - expected_new = 2 - if have_bdb: - expected_new += 1 - self.failUnlessReallyEqual(len(self.new_archives), expected_new, out) + self.failUnlessReallyEqual(len(self.new_archives), 3, out) # the original backup should still be the oldest (i.e. sorts # alphabetically towards the beginning) self.failUnlessReallyEqual(sorted(self.new_archives)[0], @@ -2567,27 +2650,23 @@ # and upload the rest. None of the directories can be reused. self.failUnlessReallyEqual(err, "") self.failUnlessReallyEqual(rc, 0) - if have_bdb: - fu, fr, fs, dc, dr, ds = self.count_output(out) - # new foo.txt, surprise file, subfile, empty - self.failUnlessReallyEqual(fu, 4) - # old bar.txt - self.failUnlessReallyEqual(fr, 1) - self.failUnlessReallyEqual(fs, 0) - # home, parent, subdir, blah.txt, surprisedir - self.failUnlessReallyEqual(dc, 5) - self.failUnlessReallyEqual(dr, 0) - self.failUnlessReallyEqual(ds, 0) + fu, fr, fs, dc, dr, ds = self.count_output(out) + # new foo.txt, surprise file, subfile, empty + self.failUnlessReallyEqual(fu, 4) + # old bar.txt + self.failUnlessReallyEqual(fr, 1) + self.failUnlessReallyEqual(fs, 0) + # home, parent, subdir, blah.txt, surprisedir + self.failUnlessReallyEqual(dc, 5) + self.failUnlessReallyEqual(dr, 0) + self.failUnlessReallyEqual(ds, 0) d.addCallback(_check5a) d.addCallback(lambda res: self.do_cli("ls", "tahoe:backups/Archives")) def _check6((rc, out, err)): self.failUnlessReallyEqual(err, "") self.failUnlessReallyEqual(rc, 0) self.new_archives = out.split() - expected_new = 3 - if have_bdb: - expected_new += 1 - self.failUnlessReallyEqual(len(self.new_archives), expected_new) + self.failUnlessReallyEqual(len(self.new_archives), 4) self.failUnlessReallyEqual(sorted(self.new_archives)[0], self.old_archives[0]) d.addCallback(_check6) @@ -2629,25 +2708,20 @@ fileutil.make_dirs(basedir) nodeurl_path = os.path.join(basedir, 'node.url') fileutil.write(nodeurl_path, 'http://example.net:2357/') + def parse(args): return parse_options(basedir, "backup", args) # test simple exclude - backup_options = cli.BackupOptions() - backup_options.parseOptions(['--exclude', '*lyx', '--node-directory', - basedir, 'from', 'to']) + backup_options = parse(['--exclude', '*lyx', 'from', 'to']) filtered = list(backup_options.filter_listdir(root_listdir)) self._check_filtering(filtered, root_listdir, (u'lib.a', u'_darcs', u'subdir'), (u'nice_doc.lyx',)) # multiple exclude - backup_options = cli.BackupOptions() - backup_options.parseOptions(['--exclude', '*lyx', '--exclude', 'lib.?', '--node-directory', - basedir, 'from', 'to']) + backup_options = parse(['--exclude', '*lyx', '--exclude', 'lib.?', 'from', 'to']) filtered = list(backup_options.filter_listdir(root_listdir)) self._check_filtering(filtered, root_listdir, (u'_darcs', u'subdir'), (u'nice_doc.lyx', u'lib.a')) # vcs metadata exclusion - backup_options = cli.BackupOptions() - backup_options.parseOptions(['--exclude-vcs', '--node-directory', - basedir, 'from', 'to']) + backup_options = parse(['--exclude-vcs', 'from', 'to']) filtered = list(backup_options.filter_listdir(subdir_listdir)) self._check_filtering(filtered, subdir_listdir, (u'another_doc.lyx', u'run_snake_run.py',), (u'CVS', u'.svn', u'_darcs')) @@ -2655,22 +2729,17 @@ exclusion_string = "_darcs\n*py\n.svn" excl_filepath = os.path.join(basedir, 'exclusion') fileutil.write(excl_filepath, exclusion_string) - backup_options = cli.BackupOptions() - backup_options.parseOptions(['--exclude-from', excl_filepath, '--node-directory', - basedir, 'from', 'to']) + backup_options = parse(['--exclude-from', excl_filepath, 'from', 'to']) filtered = list(backup_options.filter_listdir(subdir_listdir)) self._check_filtering(filtered, subdir_listdir, (u'another_doc.lyx', u'CVS'), (u'.svn', u'_darcs', u'run_snake_run.py')) # test BackupConfigurationError self.failUnlessRaises(cli.BackupConfigurationError, - backup_options.parseOptions, - ['--exclude-from', excl_filepath + '.no', '--node-directory', - basedir, 'from', 'to']) + parse, + ['--exclude-from', excl_filepath + '.no', 'from', 'to']) # test that an iterator works too - backup_options = cli.BackupOptions() - backup_options.parseOptions(['--exclude', '*lyx', '--node-directory', - basedir, 'from', 'to']) + backup_options = parse(['--exclude', '*lyx', 'from', 'to']) filtered = list(backup_options.filter_listdir(iter(root_listdir))) self._check_filtering(filtered, root_listdir, (u'lib.a', u'_darcs', u'subdir'), (u'nice_doc.lyx',)) @@ -2687,18 +2756,15 @@ fileutil.make_dirs(basedir) nodeurl_path = os.path.join(basedir, 'node.url') fileutil.write(nodeurl_path, 'http://example.net:2357/') + def parse(args): return parse_options(basedir, "backup", args) # test simple exclude - backup_options = cli.BackupOptions() - backup_options.parseOptions(['--exclude', doc_pattern_arg, '--node-directory', - basedir, 'from', 'to']) + backup_options = parse(['--exclude', doc_pattern_arg, 'from', 'to']) filtered = list(backup_options.filter_listdir(root_listdir)) self._check_filtering(filtered, root_listdir, (u'lib.a', u'_darcs', u'subdir'), (nice_doc,)) # multiple exclude - backup_options = cli.BackupOptions() - backup_options.parseOptions(['--exclude', doc_pattern_arg, '--exclude', 'lib.?', '--node-directory', - basedir, 'from', 'to']) + backup_options = parse(['--exclude', doc_pattern_arg, '--exclude', 'lib.?', 'from', 'to']) filtered = list(backup_options.filter_listdir(root_listdir)) self._check_filtering(filtered, root_listdir, (u'_darcs', u'subdir'), (nice_doc, u'lib.a')) @@ -2706,17 +2772,13 @@ exclusion_string = doc_pattern_arg + "\nlib.?" excl_filepath = os.path.join(basedir, 'exclusion') fileutil.write(excl_filepath, exclusion_string) - backup_options = cli.BackupOptions() - backup_options.parseOptions(['--exclude-from', excl_filepath, '--node-directory', - basedir, 'from', 'to']) + backup_options = parse(['--exclude-from', excl_filepath, 'from', 'to']) filtered = list(backup_options.filter_listdir(root_listdir)) self._check_filtering(filtered, root_listdir, (u'_darcs', u'subdir'), (nice_doc, u'lib.a')) # test that an iterator works too - backup_options = cli.BackupOptions() - backup_options.parseOptions(['--exclude', doc_pattern_arg, '--node-directory', - basedir, 'from', 'to']) + backup_options = parse(['--exclude', doc_pattern_arg, 'from', 'to']) filtered = list(backup_options.filter_listdir(iter(root_listdir))) self._check_filtering(filtered, root_listdir, (u'lib.a', u'_darcs', u'subdir'), (nice_doc,)) @@ -2727,14 +2789,13 @@ fileutil.make_dirs(basedir) nodeurl_path = os.path.join(basedir, 'node.url') fileutil.write(nodeurl_path, 'http://example.net:2357/') + def parse(args): return parse_options(basedir, "backup", args) # ensure that tilde expansion is performed on exclude-from argument exclude_file = u'~/.tahoe/excludes.dummy' - backup_options = cli.BackupOptions() mock.return_value = StringIO() - backup_options.parseOptions(['--exclude-from', unicode_to_argv(exclude_file), - '--node-directory', basedir, 'from', 'to']) + parse(['--exclude-from', unicode_to_argv(exclude_file), 'from', 'to']) self.failUnlessIn(((abspath_expanduser_unicode(exclude_file),), {}), mock.call_args_list) def test_ignore_symlinks(self): @@ -2896,8 +2957,41 @@ self.failUnlessReallyEqual(rc, 0) data = simplejson.loads(out) self.failUnlessReallyEqual(to_str(data["summary"]), "Healthy") + self.failUnlessReallyEqual(data["results"]["healthy"], True) d.addCallback(_check2) + d.addCallback(lambda ign: c0.upload(upload.Data("literal", convergence=""))) + def _stash_lit_uri(n): + self.lit_uri = n.get_uri() + d.addCallback(_stash_lit_uri) + + d.addCallback(lambda ign: self.do_cli("check", self.lit_uri)) + def _check_lit((rc, out, err)): + self.failUnlessReallyEqual(err, "") + self.failUnlessReallyEqual(rc, 0) + lines = out.splitlines() + self.failUnless("Summary: Healthy (LIT)" in lines, out) + d.addCallback(_check_lit) + + d.addCallback(lambda ign: self.do_cli("check", "--raw", self.lit_uri)) + def _check_lit_raw((rc, out, err)): + self.failUnlessReallyEqual(err, "") + self.failUnlessReallyEqual(rc, 0) + data = simplejson.loads(out) + self.failUnlessReallyEqual(data["results"]["healthy"], True) + d.addCallback(_check_lit_raw) + + d.addCallback(lambda ign: c0.create_immutable_dirnode({}, convergence="")) + def _stash_lit_dir_uri(n): + self.lit_dir_uri = n.get_uri() + d.addCallback(_stash_lit_dir_uri) + + d.addCallback(lambda ign: self.do_cli("check", self.lit_dir_uri)) + d.addCallback(_check_lit) + + d.addCallback(lambda ign: self.do_cli("check", "--raw", self.lit_uri)) + d.addCallback(_check_lit_raw) + def _clobber_shares(ignored): # delete one, corrupt a second shares = self.find_uri_shares(self.uri) @@ -2927,6 +3021,18 @@ self.failUnless(self._corrupt_share_line in lines, out) d.addCallback(_check3) + d.addCallback(lambda ign: self.do_cli("check", "--verify", "--raw", self.uri)) + def _check3_raw((rc, out, err)): + self.failUnlessReallyEqual(err, "") + self.failUnlessReallyEqual(rc, 0) + data = simplejson.loads(out) + self.failUnlessReallyEqual(data["results"]["healthy"], False) + self.failUnlessIn("Unhealthy: 8 shares (enc 3-of-10)", data["summary"]) + self.failUnlessReallyEqual(data["results"]["count-shares-good"], 8) + self.failUnlessReallyEqual(data["results"]["count-corrupt-shares"], 1) + self.failUnlessIn("list-corrupt-shares", data["results"]) + d.addCallback(_check3_raw) + d.addCallback(lambda ign: self.do_cli("check", "--verify", "--repair", self.uri)) def _check4((rc, out, err)): @@ -3202,8 +3308,8 @@ DATA = "data" * 100 d = c0.upload(upload.Data(DATA, convergence="")) def _stash_bad(ur): - self.uri_1share = ur.uri - self.delete_shares_numbered(ur.uri, range(1,10)) + self.uri_1share = ur.get_uri() + self.delete_shares_numbered(ur.get_uri(), range(1,10)) d.addCallback(_stash_bad) # the download is abandoned as soon as it's clear that we won't get @@ -3234,6 +3340,25 @@ return d + def test_broken_socket(self): + # When the http connection breaks (such as when node.url is overwritten + # by a confused user), a user friendly error message should be printed. + self.basedir = "cli/Errors/test_broken_socket" + self.set_up_grid() + + # Simulate a connection error + def _socket_error(*args, **kwargs): + raise socket_error('test error') + self.patch(allmydata.scripts.common_http.httplib.HTTPConnection, + "endheaders", _socket_error) + + d = self.do_cli("mkdir") + def _check_invalid((rc,stdout,stderr)): + self.failIfEqual(rc, 0) + self.failUnlessIn("Error trying to connect to http://127.0.0.1", stderr) + d.addCallback(_check_invalid) + return d + class Get(GridTestMixin, CLITestMixin, unittest.TestCase): def test_get_without_alias(self): @@ -3583,3 +3708,112 @@ _cleanup(None) raise return d + +class Options(unittest.TestCase): + # this test case only looks at argument-processing and simple stuff. + + def parse(self, args, stdout=None): + o = runner.Options() + if stdout is not None: + o.stdout = stdout + o.parseOptions(args) + while hasattr(o, "subOptions"): + o = o.subOptions + return o + + def test_list(self): + fileutil.rm_dir("cli/test_options") + fileutil.make_dirs("cli/test_options") + fileutil.make_dirs("cli/test_options/private") + fileutil.write("cli/test_options/node.url", "http://localhost:8080/\n") + filenode_uri = uri.WriteableSSKFileURI(writekey="\x00"*16, + fingerprint="\x00"*32) + private_uri = uri.DirectoryURI(filenode_uri).to_string() + fileutil.write("cli/test_options/private/root_dir.cap", private_uri + "\n") + def parse2(args): return parse_options("cli/test_options", "ls", args) + o = parse2([]) + self.failUnlessEqual(o['node-url'], "http://localhost:8080/") + self.failUnlessEqual(o.aliases[DEFAULT_ALIAS], private_uri) + self.failUnlessEqual(o.where, u"") + + o = parse2(["--node-url", "http://example.org:8111/"]) + self.failUnlessEqual(o['node-url'], "http://example.org:8111/") + self.failUnlessEqual(o.aliases[DEFAULT_ALIAS], private_uri) + self.failUnlessEqual(o.where, u"") + + o = parse2(["--dir-cap", "root"]) + self.failUnlessEqual(o['node-url'], "http://localhost:8080/") + self.failUnlessEqual(o.aliases[DEFAULT_ALIAS], "root") + self.failUnlessEqual(o.where, u"") + + other_filenode_uri = uri.WriteableSSKFileURI(writekey="\x11"*16, + fingerprint="\x11"*32) + other_uri = uri.DirectoryURI(other_filenode_uri).to_string() + o = parse2(["--dir-cap", other_uri]) + self.failUnlessEqual(o['node-url'], "http://localhost:8080/") + self.failUnlessEqual(o.aliases[DEFAULT_ALIAS], other_uri) + self.failUnlessEqual(o.where, u"") + + o = parse2(["--dir-cap", other_uri, "subdir"]) + self.failUnlessEqual(o['node-url'], "http://localhost:8080/") + self.failUnlessEqual(o.aliases[DEFAULT_ALIAS], other_uri) + self.failUnlessEqual(o.where, u"subdir") + + self.failUnlessRaises(usage.UsageError, parse2, + ["--node-url", "NOT-A-URL"]) + + o = parse2(["--node-url", "http://localhost:8080"]) + self.failUnlessEqual(o["node-url"], "http://localhost:8080/") + + o = parse2(["--node-url", "https://localhost/"]) + self.failUnlessEqual(o["node-url"], "https://localhost/") + + def test_version(self): + # "tahoe --version" dumps text to stdout and exits + stdout = StringIO() + self.failUnlessRaises(SystemExit, self.parse, ["--version"], stdout) + self.failUnlessIn("allmydata-tahoe", stdout.getvalue()) + # but "tahoe SUBCOMMAND --version" should be rejected + self.failUnlessRaises(usage.UsageError, self.parse, + ["start", "--version"]) + self.failUnlessRaises(usage.UsageError, self.parse, + ["start", "--version-and-path"]) + + def test_quiet(self): + # accepted as an overall option, but not on subcommands + o = self.parse(["--quiet", "start"]) + self.failUnless(o.parent["quiet"]) + self.failUnlessRaises(usage.UsageError, self.parse, + ["start", "--quiet"]) + + def test_basedir(self): + # accept a --node-directory option before the verb, or a --basedir + # option after, or a basedir argument after, but none in the wrong + # place, and not more than one of the three. + o = self.parse(["start"]) + self.failUnlessEqual(o["basedir"], os.path.join(os.path.expanduser("~"), + ".tahoe")) + o = self.parse(["start", "here"]) + self.failUnlessEqual(o["basedir"], os.path.abspath("here")) + o = self.parse(["start", "--basedir", "there"]) + self.failUnlessEqual(o["basedir"], os.path.abspath("there")) + o = self.parse(["--node-directory", "there", "start"]) + self.failUnlessEqual(o["basedir"], os.path.abspath("there")) + + self.failUnlessRaises(usage.UsageError, self.parse, + ["--basedir", "there", "start"]) + self.failUnlessRaises(usage.UsageError, self.parse, + ["start", "--node-directory", "there"]) + + self.failUnlessRaises(usage.UsageError, self.parse, + ["--node-directory=there", + "start", "--basedir=here"]) + self.failUnlessRaises(usage.UsageError, self.parse, + ["start", "--basedir=here", "anywhere"]) + self.failUnlessRaises(usage.UsageError, self.parse, + ["--node-directory=there", + "start", "anywhere"]) + self.failUnlessRaises(usage.UsageError, self.parse, + ["--node-directory=there", + "start", "--basedir=here", "anywhere"]) + diff -Nru tahoe-lafs-1.9.2/src/allmydata/test/test_client.py tahoe-lafs-1.10.0/src/allmydata/test/test_client.py --- tahoe-lafs-1.9.2/src/allmydata/test/test_client.py 2012-05-14 02:24:30.000000000 +0000 +++ tahoe-lafs-1.10.0/src/allmydata/test/test_client.py 2013-09-03 15:38:27.000000000 +0000 @@ -78,6 +78,22 @@ cancel_secret = c.get_cancel_secret() self.failUnless(base32.b2a(cancel_secret)) + def test_nodekey_yes_storage(self): + basedir = "test_client.Basic.test_nodekey_yes_storage" + os.mkdir(basedir) + fileutil.write(os.path.join(basedir, "tahoe.cfg"), + BASECONFIG) + c = client.Client(basedir) + self.failUnless(c.get_long_nodeid().startswith("v0-")) + + def test_nodekey_no_storage(self): + basedir = "test_client.Basic.test_nodekey_no_storage" + os.mkdir(basedir) + fileutil.write(os.path.join(basedir, "tahoe.cfg"), + BASECONFIG + "[storage]\n" + "enabled = false\n") + c = client.Client(basedir) + self.failUnless(c.get_long_nodeid().startswith("v0-")) + def test_reserved_1(self): basedir = "client.Basic.test_reserved_1" os.mkdir(basedir) @@ -132,16 +148,17 @@ "[storage]\n" + \ "enabled = true\n" + \ "reserved_space = bogus\n") - c = client.Client(basedir) - self.failUnlessEqual(c.getServiceNamed("storage").reserved_space, 0) + self.failUnlessRaises(ValueError, client.Client, basedir) def _permute(self, sb, key): - return [ s.get_serverid() for s in sb.get_servers_for_psi(key) ] + return [ s.get_longname() for s in sb.get_servers_for_psi(key) ] def test_permute(self): sb = StorageFarmBroker(None, True) for k in ["%d" % i for i in range(5)]: - sb.test_add_rref(k, "rref") + ann = {"anonymous-storage-FURL": "pb://abcde@nowhere/fake", + "permutation-seed-base32": base32.b2a(k) } + sb.test_add_rref(k, "rref", ann) self.failUnlessReallyEqual(self._permute(sb, "one"), ['3','1','0','4','2']) self.failUnlessReallyEqual(self._permute(sb, "two"), ['0','4','2','1','3']) @@ -170,6 +187,24 @@ self.failUnless("node.uptime" in stats) self.failUnless(isinstance(stats["node.uptime"], float)) + def test_helper_furl(self): + basedir = "test_client.Basic.test_helper_furl" + os.mkdir(basedir) + + def _check(config, expected_furl): + fileutil.write(os.path.join(basedir, "tahoe.cfg"), + BASECONFIG + config) + c = client.Client(basedir) + uploader = c.getServiceNamed("uploader") + furl, connected = uploader.get_helper_info() + self.failUnlessEqual(furl, expected_furl) + + _check("", None) + _check("helper.furl =\n", None) + _check("helper.furl = \n", None) + _check("helper.furl = None", None) + _check("helper.furl = pb://blah\n", "pb://blah") + @mock.patch('allmydata.util.log.msg') @mock.patch('allmydata.frontends.drop_upload.DropUploader') def test_create_drop_uploader(self, mock_drop_uploader, mock_log_msg): @@ -311,6 +346,18 @@ self.failUnless(n.is_readonly()) self.failIf(n.is_mutable()) + # Testing #1679. There was a bug that would occur when downloader was + # downloading the same readcap more than once concurrently, so the + # filenode object was cached, and there was a failure from one of the + # servers in one of the download attempts. No subsequent download + # attempt would attempt to use that server again, which would lead to + # the file being undownloadable until the gateway was restarted. The + # current fix for this (hopefully to be superceded by a better fix + # eventually) is to prevent re-use of filenodes, so the NodeMaker is + # hereby required *not* to cache and re-use filenodes for CHKs. + other_n = c.create_node_from_uri("URI:CHK:6nmrpsubgbe57udnexlkiwzmlu:bjt7j6hshrlmadjyr7otq3dc24end5meo5xcr5xe5r663po6itmq:3:10:7277") + self.failIf(n is other_n, (n, other_n)) + n = c.create_node_from_uri("URI:LIT:n5xgk") self.failUnless(IFilesystemNode.providedBy(n)) self.failUnless(IFileNode.providedBy(n)) diff -Nru tahoe-lafs-1.9.2/src/allmydata/test/test_deepcheck.py tahoe-lafs-1.10.0/src/allmydata/test/test_deepcheck.py --- tahoe-lafs-1.9.2/src/allmydata/test/test_deepcheck.py 2012-05-14 02:07:27.000000000 +0000 +++ tahoe-lafs-1.10.0/src/allmydata/test/test_deepcheck.py 2013-09-03 15:38:27.000000000 +0000 @@ -280,31 +280,29 @@ needs_rebalancing = bool( num_servers < 10 ) if not incomplete: self.failUnlessEqual(cr.needs_rebalancing(), needs_rebalancing, - str((where, cr, cr.get_data()))) - d = cr.get_data() - self.failUnlessEqual(d["count-shares-good"], 10, where) - self.failUnlessEqual(d["count-shares-needed"], 3, where) - self.failUnlessEqual(d["count-shares-expected"], 10, where) + str((where, cr, cr.as_dict()))) + self.failUnlessEqual(cr.get_share_counter_good(), 10, where) + self.failUnlessEqual(cr.get_encoding_needed(), 3, where) + self.failUnlessEqual(cr.get_encoding_expected(), 10, where) if not incomplete: - self.failUnlessEqual(d["count-good-share-hosts"], num_servers, - where) - self.failUnlessEqual(d["count-corrupt-shares"], 0, where) - self.failUnlessEqual(d["list-corrupt-shares"], [], where) + self.failUnlessEqual(cr.get_host_counter_good_shares(), + num_servers, where) + self.failUnlessEqual(cr.get_corrupt_shares(), [], where) if not incomplete: - self.failUnlessEqual(sorted(d["servers-responding"]), + self.failUnlessEqual(sorted([s.get_serverid() + for s in cr.get_servers_responding()]), sorted(self.g.get_all_serverids()), where) - self.failUnless("sharemap" in d, str((where, d))) all_serverids = set() - for (shareid, serverids) in d["sharemap"].items(): - all_serverids.update(serverids) + for (shareid, servers) in cr.get_sharemap().items(): + all_serverids.update([s.get_serverid() for s in servers]) self.failUnlessEqual(sorted(all_serverids), sorted(self.g.get_all_serverids()), where) - self.failUnlessEqual(d["count-wrong-shares"], 0, where) - self.failUnlessEqual(d["count-recoverable-versions"], 1, where) - self.failUnlessEqual(d["count-unrecoverable-versions"], 0, where) + self.failUnlessEqual(cr.get_share_counter_wrong(), 0, where) + self.failUnlessEqual(cr.get_version_counter_recoverable(), 1, where) + self.failUnlessEqual(cr.get_version_counter_unrecoverable(), 0, where) def check_and_repair_is_healthy(self, cr, n, where, incomplete=False): @@ -761,8 +759,8 @@ def do_cli_manifest_stream1(self): basedir = self.get_clientdir(0) - d = self._run_cli(["manifest", - "--node-directory", basedir, + d = self._run_cli(["--node-directory", basedir, + "manifest", self.root_uri]) def _check((out,err)): self.failUnlessEqual(err, "") @@ -789,8 +787,8 @@ def do_cli_manifest_stream2(self): basedir = self.get_clientdir(0) - d = self._run_cli(["manifest", - "--node-directory", basedir, + d = self._run_cli(["--node-directory", basedir, + "manifest", "--raw", self.root_uri]) def _check((out,err)): @@ -802,8 +800,8 @@ def do_cli_manifest_stream3(self): basedir = self.get_clientdir(0) - d = self._run_cli(["manifest", - "--node-directory", basedir, + d = self._run_cli(["--node-directory", basedir, + "manifest", "--storage-index", self.root_uri]) def _check((out,err)): @@ -814,8 +812,8 @@ def do_cli_manifest_stream4(self): basedir = self.get_clientdir(0) - d = self._run_cli(["manifest", - "--node-directory", basedir, + d = self._run_cli(["--node-directory", basedir, + "manifest", "--verify-cap", self.root_uri]) def _check((out,err)): @@ -830,8 +828,8 @@ def do_cli_manifest_stream5(self): basedir = self.get_clientdir(0) - d = self._run_cli(["manifest", - "--node-directory", basedir, + d = self._run_cli(["--node-directory", basedir, + "manifest", "--repair-cap", self.root_uri]) def _check((out,err)): @@ -846,8 +844,8 @@ def do_cli_stats1(self): basedir = self.get_clientdir(0) - d = self._run_cli(["stats", - "--node-directory", basedir, + d = self._run_cli(["--node-directory", basedir, + "stats", self.root_uri]) def _check3((out,err)): lines = [l.strip() for l in out.split("\n") if l] @@ -866,8 +864,8 @@ def do_cli_stats2(self): basedir = self.get_clientdir(0) - d = self._run_cli(["stats", - "--node-directory", basedir, + d = self._run_cli(["--node-directory", basedir, + "stats", "--raw", self.root_uri]) def _check4((out,err)): @@ -1010,9 +1008,8 @@ self.failUnless(ICheckResults.providedBy(cr), (cr, type(cr), where)) self.failUnless(cr.is_healthy(), (cr.get_report(), cr.is_healthy(), cr.get_summary(), where)) self.failUnless(cr.is_recoverable(), where) - d = cr.get_data() - self.failUnlessEqual(d["count-recoverable-versions"], 1, where) - self.failUnlessEqual(d["count-unrecoverable-versions"], 0, where) + self.failUnlessEqual(cr.get_version_counter_recoverable(), 1, where) + self.failUnlessEqual(cr.get_version_counter_unrecoverable(), 0, where) return cr except Exception, le: le.args = tuple(le.args + (where,)) @@ -1022,31 +1019,28 @@ self.failUnless(ICheckResults.providedBy(cr), where) self.failIf(cr.is_healthy(), where) self.failUnless(cr.is_recoverable(), where) - d = cr.get_data() - self.failUnlessEqual(d["count-recoverable-versions"], 1, where) - self.failUnlessEqual(d["count-unrecoverable-versions"], 0, where) + self.failUnlessEqual(cr.get_version_counter_recoverable(), 1, where) + self.failUnlessEqual(cr.get_version_counter_unrecoverable(), 0, where) return cr def check_has_corrupt_shares(self, cr, where): # by "corrupt-shares" we mean the file is still recoverable self.failUnless(ICheckResults.providedBy(cr), where) - d = cr.get_data() self.failIf(cr.is_healthy(), (where, cr)) self.failUnless(cr.is_recoverable(), where) - d = cr.get_data() - self.failUnless(d["count-shares-good"] < 10, where) - self.failUnless(d["count-corrupt-shares"], where) - self.failUnless(d["list-corrupt-shares"], where) + self.failUnless(cr.get_share_counter_good() < 10, where) + self.failUnless(cr.get_corrupt_shares(), where) return cr def check_is_unrecoverable(self, cr, where): self.failUnless(ICheckResults.providedBy(cr), where) - d = cr.get_data() self.failIf(cr.is_healthy(), where) self.failIf(cr.is_recoverable(), where) - self.failUnless(d["count-shares-good"] < d["count-shares-needed"], (d["count-shares-good"], d["count-shares-needed"], where)) - self.failUnlessEqual(d["count-recoverable-versions"], 0, where) - self.failUnlessEqual(d["count-unrecoverable-versions"], 1, where) + self.failUnless(cr.get_share_counter_good() < cr.get_encoding_needed(), + (cr.get_share_counter_good(), cr.get_encoding_needed(), + where)) + self.failUnlessEqual(cr.get_version_counter_recoverable(), 0, where) + self.failUnlessEqual(cr.get_version_counter_unrecoverable(), 1, where) return cr def do_check(self, ignored): diff -Nru tahoe-lafs-1.9.2/src/allmydata/test/test_dirnode.py tahoe-lafs-1.10.0/src/allmydata/test/test_dirnode.py --- tahoe-lafs-1.9.2/src/allmydata/test/test_dirnode.py 2012-05-14 02:07:27.000000000 +0000 +++ tahoe-lafs-1.10.0/src/allmydata/test/test_dirnode.py 2013-09-03 15:38:27.000000000 +0000 @@ -1097,6 +1097,23 @@ d.addCallback(_check_results) return d + def test_deepcheck_cachemisses(self): + self.basedir = "dirnode/Dirnode/test_mdmf_cachemisses" + self.set_up_grid() + d = self._test_deepcheck_create() + # Clear the counters and set the rootnode + d.addCallback(lambda rootnode: + not [ss._clear_counters() for ss + in self.g.wrappers_by_id.values()] or rootnode) + d.addCallback(lambda rootnode: rootnode.start_deep_check().when_done()) + def _check(ign): + count = sum([ss.counter_by_methname['slot_readv'] + for ss in self.g.wrappers_by_id.values()]) + self.failIf(count > 60, 'Expected only 60 cache misses,' + 'unfortunately there were %d' % (count,)) + d.addCallback(_check) + return d + def test_deepcheck_mdmf(self): self.basedir = "dirnode/Dirnode/test_deepcheck_mdmf" self.set_up_grid() diff -Nru tahoe-lafs-1.9.2/src/allmydata/test/test_download.py tahoe-lafs-1.10.0/src/allmydata/test/test_download.py --- tahoe-lafs-1.9.2/src/allmydata/test/test_download.py 2012-05-14 02:07:27.000000000 +0000 +++ tahoe-lafs-1.10.0/src/allmydata/test/test_download.py 2013-09-03 15:38:27.000000000 +0000 @@ -87,9 +87,9 @@ def _created_immutable(ur): # write the generated shares and URI to a file, which can then be # incorporated into this one next time. - f.write('immutable_uri = "%s"\n' % ur.uri) + f.write('immutable_uri = "%s"\n' % ur.get_uri()) f.write('immutable_shares = {\n') - si = uri.from_string(ur.uri).get_storage_index() + si = uri.from_string(ur.get_uri()).get_storage_index() si_dir = storage_index_to_dir(si) for (i,ss,ssdir) in self.iterate_servers(): sharedir = os.path.join(ssdir, "shares", si_dir) @@ -284,7 +284,7 @@ u.max_segment_size = 70 # 5 segs d = self.c0.upload(u) def _uploaded(ur): - self.uri = ur.uri + self.uri = ur.get_uri() self.n = self.c0.create_node_from_uri(self.uri) return download_to_data(self.n) d.addCallback(_uploaded) @@ -372,7 +372,7 @@ con2 = MemoryConsumer() d = self.c0.upload(u) def _uploaded(ur): - n = self.c0.create_node_from_uri(ur.uri) + n = self.c0.create_node_from_uri(ur.get_uri()) d1 = n.read(con1, 70, 20) d2 = n.read(con2, 140, 20) return defer.gatherResults([d1,d2]) @@ -397,7 +397,7 @@ con2 = MemoryConsumer() d = self.c0.upload(u) def _uploaded(ur): - n = self.c0.create_node_from_uri(ur.uri) + n = self.c0.create_node_from_uri(ur.get_uri()) n._cnode._maybe_create_download_node() n._cnode._node._build_guessed_tables(u.max_segment_size) d1 = n.read(con1, 70, 20) @@ -425,7 +425,7 @@ con2 = MemoryConsumer() d = self.c0.upload(u) def _uploaded(ur): - n = self.c0.create_node_from_uri(ur.uri) + n = self.c0.create_node_from_uri(ur.get_uri()) n._cnode._maybe_create_download_node() n._cnode._node._build_guessed_tables(u.max_segment_size) d = n.read(con1, 12000, 20) @@ -515,8 +515,8 @@ def _corruptor(s, debug=False): which = 48 # first byte of block0 return s[:which] + chr(ord(s[which])^0x01) + s[which+1:] - self.corrupt_all_shares(ur.uri, _corruptor) - n = self.c0.create_node_from_uri(ur.uri) + self.corrupt_all_shares(ur.get_uri(), _corruptor) + n = self.c0.create_node_from_uri(ur.get_uri()) n._cnode._maybe_create_download_node() n._cnode._node._build_guessed_tables(u.max_segment_size) con1 = MemoryConsumer() @@ -556,8 +556,8 @@ def _corruptor(s, debug=False): which = 48 # first byte of block0 return s[:which] + chr(ord(s[which])^0x01) + s[which+1:] - self.corrupt_all_shares(ur.uri, _corruptor) - n = self.c0.create_node_from_uri(ur.uri) + self.corrupt_all_shares(ur.get_uri(), _corruptor) + n = self.c0.create_node_from_uri(ur.get_uri()) n._cnode._maybe_create_download_node() n._cnode._node._build_guessed_tables(u.max_segment_size) con1 = MemoryConsumer() @@ -771,7 +771,7 @@ u.max_segment_size = 60 # 6 segs d = self.c0.upload(u) def _uploaded(ur): - n = self.c0.create_node_from_uri(ur.uri) + n = self.c0.create_node_from_uri(ur.get_uri()) n._cnode._maybe_create_download_node() n._cnode._node._build_guessed_tables(u.max_segment_size) @@ -810,7 +810,7 @@ con2 = MemoryConsumer() d = self.c0.upload(u) def _uploaded(ur): - n = self.c0.create_node_from_uri(ur.uri) + n = self.c0.create_node_from_uri(ur.get_uri()) n._cnode._maybe_create_download_node() n._cnode._node._build_guessed_tables(u.max_segment_size) d1 = n.read(con1, 70, 20) @@ -1002,7 +1002,7 @@ d = self.c0.upload(u) def _uploaded(ur): - imm_uri = ur.uri + imm_uri = ur.get_uri() self.shares = self.copy_shares(imm_uri) d = defer.succeed(None) # 'victims' is a list of corruption tests to run. Each one flips @@ -1099,7 +1099,7 @@ d = self.c0.upload(u) def _uploaded(ur): - imm_uri = ur.uri + imm_uri = ur.get_uri() self.shares = self.copy_shares(imm_uri) corrupt_me = [(48, "block data", "Last failure: None"), @@ -1159,7 +1159,7 @@ u = upload.Data(plaintext, None) d = self.c0.upload(u) def _uploaded(ur): - imm_uri = ur.uri + imm_uri = ur.get_uri() n = self.c0.create_node_from_uri(imm_uri) return download_to_data(n) d.addCallback(_uploaded) @@ -1182,7 +1182,7 @@ u = upload.Data(plaintext, None) d = self.c0.upload(u) def _uploaded(ur): - imm_uri = ur.uri + imm_uri = ur.get_uri() n = self.c0.create_node_from_uri(imm_uri) return download_to_data(n) d.addCallback(_uploaded) @@ -1202,7 +1202,7 @@ u = upload.Data(plaintext, None) d = self.c0.upload(u) def _uploaded(ur): - imm_uri = ur.uri + imm_uri = ur.get_uri() def _do_corrupt(which, newvalue): def _corruptor(s, debug=False): return s[:which] + chr(newvalue) + s[which+1:] diff -Nru tahoe-lafs-1.9.2/src/allmydata/test/test_encode.py tahoe-lafs-1.10.0/src/allmydata/test/test_encode.py --- tahoe-lafs-1.9.2/src/allmydata/test/test_encode.py 2012-06-21 19:42:46.000000000 +0000 +++ tahoe-lafs-1.10.0/src/allmydata/test/test_encode.py 2013-09-03 15:38:27.000000000 +0000 @@ -268,10 +268,8 @@ # force use of multiple segments e = encode.Encoder() u = upload.Data(data, convergence="some convergence string") - u.max_segment_size = max_segment_size - u.encoding_param_k = 25 - u.encoding_param_happy = 75 - u.encoding_param_n = 100 + u.set_default_encoding_parameters({'max_segment_size': max_segment_size, + 'k': 25, 'happy': 75, 'n': 100}) eu = upload.EncryptAnUploadable(u) d = e.set_encrypted_uploadable(eu) @@ -395,7 +393,7 @@ u.encoding_param_happy = 1 u.encoding_param_n = 100 d = self.c0.upload(u) - d.addCallback(lambda ur: self.c0.create_node_from_uri(ur.uri)) + d.addCallback(lambda ur: self.c0.create_node_from_uri(ur.get_uri())) # returns a FileNode return d diff -Nru tahoe-lafs-1.9.2/src/allmydata/test/test_encodingutil.py tahoe-lafs-1.10.0/src/allmydata/test/test_encodingutil.py --- tahoe-lafs-1.9.2/src/allmydata/test/test_encodingutil.py 2012-05-14 02:07:27.000000000 +0000 +++ tahoe-lafs-1.10.0/src/allmydata/test/test_encodingutil.py 2013-09-03 15:38:27.000000000 +0000 @@ -295,36 +295,37 @@ def tearDown(self): _reload() - def _check(self, inp, out, enc, optional_quotes): + def _check(self, inp, out, enc, optional_quotes, quote_newlines): out2 = out if optional_quotes: out2 = out2[1:-1] - self.failUnlessReallyEqual(quote_output(inp, encoding=enc), out) - self.failUnlessReallyEqual(quote_output(inp, encoding=enc, quotemarks=False), out2) + self.failUnlessReallyEqual(quote_output(inp, encoding=enc, quote_newlines=quote_newlines), out) + self.failUnlessReallyEqual(quote_output(inp, encoding=enc, quotemarks=False, quote_newlines=quote_newlines), out2) if out[0:2] == 'b"': pass elif isinstance(inp, str): - self.failUnlessReallyEqual(quote_output(unicode(inp), encoding=enc), out) - self.failUnlessReallyEqual(quote_output(unicode(inp), encoding=enc, quotemarks=False), out2) + self.failUnlessReallyEqual(quote_output(unicode(inp), encoding=enc, quote_newlines=quote_newlines), out) + self.failUnlessReallyEqual(quote_output(unicode(inp), encoding=enc, quotemarks=False, quote_newlines=quote_newlines), out2) else: - self.failUnlessReallyEqual(quote_output(inp.encode('utf-8'), encoding=enc), out) - self.failUnlessReallyEqual(quote_output(inp.encode('utf-8'), encoding=enc, quotemarks=False), out2) + self.failUnlessReallyEqual(quote_output(inp.encode('utf-8'), encoding=enc, quote_newlines=quote_newlines), out) + self.failUnlessReallyEqual(quote_output(inp.encode('utf-8'), encoding=enc, quotemarks=False, quote_newlines=quote_newlines), out2) def _test_quote_output_all(self, enc): - def check(inp, out, optional_quotes=False): - self._check(inp, out, enc, optional_quotes) + def check(inp, out, optional_quotes=False, quote_newlines=None): + self._check(inp, out, enc, optional_quotes, quote_newlines) # optional single quotes check("foo", "'foo'", True) check("\\", "'\\'", True) check("$\"`", "'$\"`'", True) + check("\n", "'\n'", True, quote_newlines=False) # mandatory single quotes check("\"", "'\"'") # double quotes check("'", "\"'\"") - check("\n", "\"\\x0a\"") + check("\n", "\"\\x0a\"", quote_newlines=True) check("\x00", "\"\\x00\"") # invalid Unicode and astral planes @@ -343,8 +344,8 @@ check("\x00\"$\\`\x80\xFF", "b\"\\x00\\\"\\$\\\\\\`\\x80\\xff\"") def test_quote_output_ascii(self, enc='ascii'): - def check(inp, out, optional_quotes=False): - self._check(inp, out, enc, optional_quotes) + def check(inp, out, optional_quotes=False, quote_newlines=None): + self._check(inp, out, enc, optional_quotes, quote_newlines) self._test_quote_output_all(enc) check(u"\u00D7", "\"\\xd7\"") @@ -353,10 +354,12 @@ check(u"\u2621", "\"\\u2621\"") check(u"'\u2621", "\"'\\u2621\"") check(u"\"\u2621", "\"\\\"\\u2621\"") + check(u"\n", "'\n'", True, quote_newlines=False) + check(u"\n", "\"\\x0a\"", quote_newlines=True) def test_quote_output_latin1(self, enc='latin1'): - def check(inp, out, optional_quotes=False): - self._check(inp, out.encode('latin1'), enc, optional_quotes) + def check(inp, out, optional_quotes=False, quote_newlines=None): + self._check(inp, out.encode('latin1'), enc, optional_quotes, quote_newlines) self._test_quote_output_all(enc) check(u"\u00D7", u"'\u00D7'", True) @@ -366,16 +369,20 @@ check(u"\u2621", u"\"\\u2621\"") check(u"'\u2621", u"\"'\\u2621\"") check(u"\"\u2621", u"\"\\\"\\u2621\"") + check(u"\n", u"'\n'", True, quote_newlines=False) + check(u"\n", u"\"\\x0a\"", quote_newlines=True) def test_quote_output_utf8(self, enc='utf-8'): - def check(inp, out, optional_quotes=False): - self._check(inp, out.encode('utf-8'), enc, optional_quotes) + def check(inp, out, optional_quotes=False, quote_newlines=None): + self._check(inp, out.encode('utf-8'), enc, optional_quotes, quote_newlines) self._test_quote_output_all(enc) check(u"\u2621", u"'\u2621'", True) check(u"'\u2621", u"\"'\u2621\"") check(u"\"\u2621", u"'\"\u2621'") check(u"\u2621\"", u"'\u2621\"'", True) + check(u"\n", u"'\n'", True, quote_newlines=False) + check(u"\n", u"\"\\x0a\"", quote_newlines=True) def test_quote_output_default(self): encodingutil.io_encoding = 'ascii' diff -Nru tahoe-lafs-1.9.2/src/allmydata/test/test_helper.py tahoe-lafs-1.10.0/src/allmydata/test/test_helper.py --- tahoe-lafs-1.9.2/src/allmydata/test/test_helper.py 2012-05-14 02:07:27.000000000 +0000 +++ tahoe-lafs-1.10.0/src/allmydata/test/test_helper.py 2013-09-03 15:38:27.000000000 +0000 @@ -1,4 +1,5 @@ import os +from twisted.internet import defer from twisted.trial import unittest from twisted.application import service @@ -21,25 +22,49 @@ def _got_size(size): d2 = eu.get_all_encoding_parameters() def _got_parms(parms): + # just pretend we did the upload needed_shares, happy, total_shares, segsize = parms ueb_data = {"needed_shares": needed_shares, "total_shares": total_shares, "segment_size": segsize, "size": size, } - self._results.uri_extension_data = ueb_data - self._results.verifycapstr = uri.CHKFileVerifierURI(self._storage_index, "x"*32, - needed_shares, total_shares, - size).to_string() - return self._results + ueb_hash = "fake" + v = uri.CHKFileVerifierURI(self._storage_index, "x"*32, + needed_shares, total_shares, size) + _UR = upload.UploadResults + ur = _UR(file_size=size, + ciphertext_fetched=0, + preexisting_shares=0, + pushed_shares=total_shares, + sharemap={}, + servermap={}, + timings={}, + uri_extension_data=ueb_data, + uri_extension_hash=ueb_hash, + verifycapstr=v.to_string()) + self._upload_status.set_results(ur) + return ur d2.addCallback(_got_parms) return d2 d.addCallback(_got_size) return d -class CHKUploadHelper_already_uploaded(offloaded.CHKUploadHelper): - def start(self): - res = upload.UploadResults() +class Helper_fake_upload(offloaded.Helper): + def _make_chk_upload_helper(self, storage_index, lp): + si_s = si_b2a(storage_index) + incoming_file = os.path.join(self._chk_incoming, si_s) + encoding_file = os.path.join(self._chk_encoding, si_s) + uh = CHKUploadHelper_fake(storage_index, self, + self._storage_broker, + self._secret_holder, + incoming_file, encoding_file, + lp) + return uh + +class Helper_already_uploaded(Helper_fake_upload): + def _check_chk(self, storage_index, lp): + res = upload.HelperUploadResults() res.uri_extension_hash = hashutil.uri_extension_hash("") # we're pretending that the file they're trying to upload was already @@ -53,7 +78,7 @@ "size": len(DATA), } res.uri_extension_data = ueb_data - return (res, None) + return defer.succeed(res) class FakeClient(service.MultiService): DEFAULT_ENCODING_PARAMETERS = {"k":25, @@ -64,6 +89,8 @@ def get_encoding_parameters(self): return self.DEFAULT_ENCODING_PARAMETERS + def get_storage_broker(self): + return self.storage_broker def flush_but_dont_ignore(res): d = flushEventualQueue() @@ -89,8 +116,8 @@ timeout = 240 # It takes longer than 120 seconds on Francois's arm box. def setUp(self): self.s = FakeClient() - self.storage_broker = StorageFarmBroker(None, True) - self.secret_holder = client.SecretHolder("lease secret", "convergence") + self.s.storage_broker = StorageFarmBroker(None, True) + self.s.secret_holder = client.SecretHolder("lease secret", "converge") self.s.startService() self.tub = t = Tub() @@ -101,13 +128,12 @@ # bogus host/port t.setLocation("bogus:1234") - def setUpHelper(self, basedir): + def setUpHelper(self, basedir, helper_class=Helper_fake_upload): fileutil.make_dirs(basedir) - self.helper = h = offloaded.Helper(basedir, - self.storage_broker, - self.secret_holder, - None, None) - h.chk_upload_helper_class = CHKUploadHelper_fake + self.helper = h = helper_class(basedir, + self.s.storage_broker, + self.s.secret_holder, + None, None) self.helper_furl = self.tub.registerReference(h) def tearDown(self): @@ -131,7 +157,7 @@ return upload_data(u, DATA, convergence="some convergence string") d.addCallback(_ready) def _uploaded(results): - the_uri = results.uri + the_uri = results.get_uri() assert "CHK" in the_uri d.addCallback(_uploaded) @@ -181,7 +207,7 @@ return upload_data(u, DATA, convergence="test convergence string") d.addCallback(_ready) def _uploaded(results): - the_uri = results.uri + the_uri = results.get_uri() assert "CHK" in the_uri d.addCallback(_uploaded) @@ -196,8 +222,7 @@ def test_already_uploaded(self): self.basedir = "helper/AssistedUpload/test_already_uploaded" - self.setUpHelper(self.basedir) - self.helper.chk_upload_helper_class = CHKUploadHelper_already_uploaded + self.setUpHelper(self.basedir, helper_class=Helper_already_uploaded) u = upload.Uploader(self.helper_furl) u.setServiceParent(self.s) @@ -209,7 +234,7 @@ return upload_data(u, DATA, convergence="some convergence string") d.addCallback(_ready) def _uploaded(results): - the_uri = results.uri + the_uri = results.get_uri() assert "CHK" in the_uri d.addCallback(_uploaded) diff -Nru tahoe-lafs-1.9.2/src/allmydata/test/test_hung_server.py tahoe-lafs-1.10.0/src/allmydata/test/test_hung_server.py --- tahoe-lafs-1.9.2/src/allmydata/test/test_hung_server.py 2012-05-14 02:07:27.000000000 +0000 +++ tahoe-lafs-1.10.0/src/allmydata/test/test_hung_server.py 2013-09-03 15:38:27.000000000 +0000 @@ -117,7 +117,7 @@ data = upload.Data(immutable_plaintext, convergence="") d = self.c0.upload(data) def _uploaded_immutable(upload_res): - self.uri = upload_res.uri + self.uri = upload_res.get_uri() self.shares = self.find_uri_shares(self.uri) d.addCallback(_uploaded_immutable) return d diff -Nru tahoe-lafs-1.9.2/src/allmydata/test/test_immutable.py tahoe-lafs-1.10.0/src/allmydata/test/test_immutable.py --- tahoe-lafs-1.9.2/src/allmydata/test/test_immutable.py 2012-05-14 02:07:27.000000000 +0000 +++ tahoe-lafs-1.10.0/src/allmydata/test/test_immutable.py 2013-09-03 15:38:27.000000000 +0000 @@ -146,8 +146,8 @@ c1.DEFAULT_ENCODING_PARAMETERS['happy'] = 1 d = c1.upload(Data(TEST_DATA, convergence="")) def _after_upload(ur): - self.uri = ur.uri - self.filenode = self.g.clients[0].create_node_from_uri(ur.uri) + self.uri = ur.get_uri() + self.filenode = self.g.clients[0].create_node_from_uri(ur.get_uri()) return self.uri d.addCallback(_after_upload) return d diff -Nru tahoe-lafs-1.9.2/src/allmydata/test/test_introducer.py tahoe-lafs-1.10.0/src/allmydata/test/test_introducer.py --- tahoe-lafs-1.9.2/src/allmydata/test/test_introducer.py 2012-05-14 02:07:27.000000000 +0000 +++ tahoe-lafs-1.10.0/src/allmydata/test/test_introducer.py 2013-09-03 15:38:27.000000000 +0000 @@ -1,19 +1,27 @@ -import os, re +import os, re, itertools from base64 import b32decode +import simplejson from twisted.trial import unittest -from twisted.internet import defer +from twisted.internet import defer, address from twisted.python import log from foolscap.api import Tub, Referenceable, fireEventually, flushEventualQueue from twisted.application import service from allmydata.interfaces import InsufficientVersionError -from allmydata.introducer.client import IntroducerClient -from allmydata.introducer.server import IntroducerService +from allmydata.introducer.client import IntroducerClient, \ + WrapV2ClientInV1Interface +from allmydata.introducer.server import IntroducerService, FurlFileConflictError +from allmydata.introducer.common import get_tubid_string_from_ann, \ + get_tubid_string, sign_to_foolscap, unsign_from_foolscap, \ + UnknownKeyError +from allmydata.introducer import old # test compatibility with old introducer .tac files from allmydata.introducer import IntroducerNode -from allmydata.util import pollmixin +from allmydata.web import introweb +from allmydata.client import Client as TahoeClient +from allmydata.util import pollmixin, keyutil, idlib, fileutil import allmydata.test.common_util as testutil class LoggingMultiService(service.MultiService): @@ -21,15 +29,49 @@ log.msg(msg, **kw) class Node(testutil.SignalMixin, unittest.TestCase): - def test_loadable(self): - basedir = "introducer.IntroducerNode.test_loadable" + def test_furl(self): + basedir = "introducer.IntroducerNode.test_furl" os.mkdir(basedir) - q = IntroducerNode(basedir) + public_fn = os.path.join(basedir, "introducer.furl") + private_fn = os.path.join(basedir, "private", "introducer.furl") + q1 = IntroducerNode(basedir) d = fireEventually(None) - d.addCallback(lambda res: q.startService()) - d.addCallback(lambda res: q.when_tub_ready()) - d.addCallback(lambda res: q.stopService()) + d.addCallback(lambda res: q1.startService()) + d.addCallback(lambda res: q1.when_tub_ready()) + d.addCallback(lambda res: q1.stopService()) d.addCallback(flushEventualQueue) + def _check_furl(res): + # new nodes create unguessable furls in private/introducer.furl + ifurl = fileutil.read(private_fn) + self.failUnless(ifurl) + ifurl = ifurl.strip() + self.failIf(ifurl.endswith("/introducer"), ifurl) + + # old nodes created guessable furls in BASEDIR/introducer.furl + guessable = ifurl[:ifurl.rfind("/")] + "/introducer" + fileutil.write(public_fn, guessable+"\n", mode="w") # text + + # if we see both files, throw an error + self.failUnlessRaises(FurlFileConflictError, + IntroducerNode, basedir) + + # when we see only the public one, move it to private/ and use + # the existing furl instead of creating a new one + os.unlink(private_fn) + q2 = IntroducerNode(basedir) + d2 = fireEventually(None) + d2.addCallback(lambda res: q2.startService()) + d2.addCallback(lambda res: q2.when_tub_ready()) + d2.addCallback(lambda res: q2.stopService()) + d2.addCallback(flushEventualQueue) + def _check_furl2(res): + self.failIf(os.path.exists(public_fn)) + ifurl2 = fileutil.read(private_fn) + self.failUnless(ifurl2) + self.failUnlessEqual(ifurl2.strip(), guessable) + d2.addCallback(_check_furl2) + return d2 + d.addCallback(_check_furl) return d class ServiceMixin: @@ -47,14 +89,14 @@ def test_create(self): ic = IntroducerClient(None, "introducer.furl", u"my_nickname", - "my_version", "oldest_version") + "my_version", "oldest_version", {}, fakeseq) self.failUnless(isinstance(ic, IntroducerClient)) def test_listen(self): i = IntroducerService() i.setServiceParent(self.parent) - def test_duplicate(self): + def test_duplicate_publish(self): i = IntroducerService() self.failUnlessEqual(len(i.get_announcements()), 0) self.failUnlessEqual(len(i.get_subscribers()), 0) @@ -73,6 +115,331 @@ self.failUnlessEqual(len(i.get_announcements()), 2) self.failUnlessEqual(len(i.get_subscribers()), 0) + def test_id_collision(self): + # test replacement case where tubid equals a keyid (one should + # not replace the other) + i = IntroducerService() + ic = IntroducerClient(None, + "introducer.furl", u"my_nickname", + "my_version", "oldest_version", {}, fakeseq) + sk_s, vk_s = keyutil.make_keypair() + sk, _ignored = keyutil.parse_privkey(sk_s) + keyid = keyutil.remove_prefix(vk_s, "pub-v0-") + furl1 = "pb://onug64tu@127.0.0.1:123/short" # base32("short") + ann_t = make_ann_t(ic, furl1, sk, 1) + i.remote_publish_v2(ann_t, Referenceable()) + announcements = i.get_announcements() + self.failUnlessEqual(len(announcements), 1) + key1 = ("storage", "v0-"+keyid, None) + self.failUnlessEqual(announcements[0].index, key1) + ann1_out = announcements[0].announcement + self.failUnlessEqual(ann1_out["anonymous-storage-FURL"], furl1) + + furl2 = "pb://%s@127.0.0.1:36106/swissnum" % keyid + ann2 = (furl2, "storage", "RIStorage", "nick1", "ver23", "ver0") + i.remote_publish(ann2) + announcements = i.get_announcements() + self.failUnlessEqual(len(announcements), 2) + key2 = ("storage", None, keyid) + wanted = [ad for ad in announcements if ad.index == key2] + self.failUnlessEqual(len(wanted), 1) + ann2_out = wanted[0].announcement + self.failUnlessEqual(ann2_out["anonymous-storage-FURL"], furl2) + + +def fakeseq(): + return 1, "nonce" + +seqnum_counter = itertools.count(1) +def realseq(): + return seqnum_counter.next(), str(os.randint(1,100000)) + +def make_ann(furl): + ann = { "anonymous-storage-FURL": furl, + "permutation-seed-base32": get_tubid_string(furl) } + return ann + +def make_ann_t(ic, furl, privkey, seqnum): + ann_d = ic.create_announcement_dict("storage", make_ann(furl)) + ann_d["seqnum"] = seqnum + ann_d["nonce"] = "nonce" + ann_t = sign_to_foolscap(ann_d, privkey) + return ann_t + +class Client(unittest.TestCase): + def test_duplicate_receive_v1(self): + ic = IntroducerClient(None, + "introducer.furl", u"my_nickname", + "my_version", "oldest_version", {}, fakeseq) + announcements = [] + ic.subscribe_to("storage", + lambda key_s,ann: announcements.append(ann)) + furl1 = "pb://62ubehyunnyhzs7r6vdonnm2hpi52w6y@127.0.0.1:36106/gydnpigj2ja2qr2srq4ikjwnl7xfgbra" + ann1 = (furl1, "storage", "RIStorage", "nick1", "ver23", "ver0") + ann1b = (furl1, "storage", "RIStorage", "nick1", "ver24", "ver0") + ca = WrapV2ClientInV1Interface(ic) + + ca.remote_announce([ann1]) + d = fireEventually() + def _then(ign): + self.failUnlessEqual(len(announcements), 1) + self.failUnlessEqual(announcements[0]["nickname"], u"nick1") + self.failUnlessEqual(announcements[0]["my-version"], "ver23") + self.failUnlessEqual(ic._debug_counts["inbound_announcement"], 1) + self.failUnlessEqual(ic._debug_counts["new_announcement"], 1) + self.failUnlessEqual(ic._debug_counts["update"], 0) + self.failUnlessEqual(ic._debug_counts["duplicate_announcement"], 0) + # now send a duplicate announcement: this should not notify clients + ca.remote_announce([ann1]) + return fireEventually() + d.addCallback(_then) + def _then2(ign): + self.failUnlessEqual(len(announcements), 1) + self.failUnlessEqual(ic._debug_counts["inbound_announcement"], 2) + self.failUnlessEqual(ic._debug_counts["new_announcement"], 1) + self.failUnlessEqual(ic._debug_counts["update"], 0) + self.failUnlessEqual(ic._debug_counts["duplicate_announcement"], 1) + # and a replacement announcement: same FURL, new other stuff. + # Clients should be notified. + ca.remote_announce([ann1b]) + return fireEventually() + d.addCallback(_then2) + def _then3(ign): + self.failUnlessEqual(len(announcements), 2) + self.failUnlessEqual(ic._debug_counts["inbound_announcement"], 3) + self.failUnlessEqual(ic._debug_counts["new_announcement"], 1) + self.failUnlessEqual(ic._debug_counts["update"], 1) + self.failUnlessEqual(ic._debug_counts["duplicate_announcement"], 1) + # test that the other stuff changed + self.failUnlessEqual(announcements[-1]["nickname"], u"nick1") + self.failUnlessEqual(announcements[-1]["my-version"], "ver24") + d.addCallback(_then3) + return d + + def test_duplicate_receive_v2(self): + ic1 = IntroducerClient(None, + "introducer.furl", u"my_nickname", + "ver23", "oldest_version", {}, fakeseq) + # we use a second client just to create a different-looking + # announcement + ic2 = IntroducerClient(None, + "introducer.furl", u"my_nickname", + "ver24","oldest_version",{}, fakeseq) + announcements = [] + def _received(key_s, ann): + announcements.append( (key_s, ann) ) + ic1.subscribe_to("storage", _received) + furl1 = "pb://62ubehyunnyhzs7r6vdonnm2hpi52w6y@127.0.0.1:36106/gydnp" + furl1a = "pb://62ubehyunnyhzs7r6vdonnm2hpi52w6y@127.0.0.1:7777/gydnp" + furl2 = "pb://ttwwooyunnyhzs7r6vdonnm2hpi52w6y@127.0.0.1:36106/ttwwoo" + + privkey_s, pubkey_vs = keyutil.make_keypair() + privkey, _ignored = keyutil.parse_privkey(privkey_s) + pubkey_s = keyutil.remove_prefix(pubkey_vs, "pub-") + + # ann1: ic1, furl1 + # ann1a: ic1, furl1a (same SturdyRef, different connection hints) + # ann1b: ic2, furl1 + # ann2: ic2, furl2 + + self.ann1 = make_ann_t(ic1, furl1, privkey, seqnum=10) + self.ann1old = make_ann_t(ic1, furl1, privkey, seqnum=9) + self.ann1noseqnum = make_ann_t(ic1, furl1, privkey, seqnum=None) + self.ann1b = make_ann_t(ic2, furl1, privkey, seqnum=11) + self.ann1a = make_ann_t(ic1, furl1a, privkey, seqnum=12) + self.ann2 = make_ann_t(ic2, furl2, privkey, seqnum=13) + + ic1.remote_announce_v2([self.ann1]) # queues eventual-send + d = fireEventually() + def _then1(ign): + self.failUnlessEqual(len(announcements), 1) + key_s,ann = announcements[0] + self.failUnlessEqual(key_s, pubkey_s) + self.failUnlessEqual(ann["anonymous-storage-FURL"], furl1) + self.failUnlessEqual(ann["my-version"], "ver23") + d.addCallback(_then1) + + # now send a duplicate announcement. This should not fire the + # subscriber + d.addCallback(lambda ign: ic1.remote_announce_v2([self.ann1])) + d.addCallback(fireEventually) + def _then2(ign): + self.failUnlessEqual(len(announcements), 1) + d.addCallback(_then2) + + # an older announcement shouldn't fire the subscriber either + d.addCallback(lambda ign: ic1.remote_announce_v2([self.ann1old])) + d.addCallback(fireEventually) + def _then2a(ign): + self.failUnlessEqual(len(announcements), 1) + d.addCallback(_then2a) + + # announcement with no seqnum cannot replace one with-seqnum + d.addCallback(lambda ign: ic1.remote_announce_v2([self.ann1noseqnum])) + d.addCallback(fireEventually) + def _then2b(ign): + self.failUnlessEqual(len(announcements), 1) + d.addCallback(_then2b) + + # and a replacement announcement: same FURL, new other stuff. The + # subscriber *should* be fired. + d.addCallback(lambda ign: ic1.remote_announce_v2([self.ann1b])) + d.addCallback(fireEventually) + def _then3(ign): + self.failUnlessEqual(len(announcements), 2) + key_s,ann = announcements[-1] + self.failUnlessEqual(key_s, pubkey_s) + self.failUnlessEqual(ann["anonymous-storage-FURL"], furl1) + self.failUnlessEqual(ann["my-version"], "ver24") + d.addCallback(_then3) + + # and a replacement announcement with a different FURL (it uses + # different connection hints) + d.addCallback(lambda ign: ic1.remote_announce_v2([self.ann1a])) + d.addCallback(fireEventually) + def _then4(ign): + self.failUnlessEqual(len(announcements), 3) + key_s,ann = announcements[-1] + self.failUnlessEqual(key_s, pubkey_s) + self.failUnlessEqual(ann["anonymous-storage-FURL"], furl1a) + self.failUnlessEqual(ann["my-version"], "ver23") + d.addCallback(_then4) + + # now add a new subscription, which should be called with the + # backlog. The introducer only records one announcement per index, so + # the backlog will only have the latest message. + announcements2 = [] + def _received2(key_s, ann): + announcements2.append( (key_s, ann) ) + d.addCallback(lambda ign: ic1.subscribe_to("storage", _received2)) + d.addCallback(fireEventually) + def _then5(ign): + self.failUnlessEqual(len(announcements2), 1) + key_s,ann = announcements2[-1] + self.failUnlessEqual(key_s, pubkey_s) + self.failUnlessEqual(ann["anonymous-storage-FURL"], furl1a) + self.failUnlessEqual(ann["my-version"], "ver23") + d.addCallback(_then5) + return d + + def test_id_collision(self): + # test replacement case where tubid equals a keyid (one should + # not replace the other) + ic = IntroducerClient(None, + "introducer.furl", u"my_nickname", + "my_version", "oldest_version", {}, fakeseq) + announcements = [] + ic.subscribe_to("storage", + lambda key_s,ann: announcements.append(ann)) + sk_s, vk_s = keyutil.make_keypair() + sk, _ignored = keyutil.parse_privkey(sk_s) + keyid = keyutil.remove_prefix(vk_s, "pub-v0-") + furl1 = "pb://onug64tu@127.0.0.1:123/short" # base32("short") + furl2 = "pb://%s@127.0.0.1:36106/swissnum" % keyid + ann_t = make_ann_t(ic, furl1, sk, 1) + ic.remote_announce_v2([ann_t]) + d = fireEventually() + def _then(ign): + # first announcement has been processed + self.failUnlessEqual(len(announcements), 1) + self.failUnlessEqual(announcements[0]["anonymous-storage-FURL"], + furl1) + # now submit a second one, with a tubid that happens to look just + # like the pubkey-based serverid we just processed. They should + # not overlap. + ann2 = (furl2, "storage", "RIStorage", "nick1", "ver23", "ver0") + ca = WrapV2ClientInV1Interface(ic) + ca.remote_announce([ann2]) + return fireEventually() + d.addCallback(_then) + def _then2(ign): + # if they overlapped, the second announcement would be ignored + self.failUnlessEqual(len(announcements), 2) + self.failUnlessEqual(announcements[1]["anonymous-storage-FURL"], + furl2) + d.addCallback(_then2) + return d + +class Server(unittest.TestCase): + def test_duplicate(self): + i = IntroducerService() + ic1 = IntroducerClient(None, + "introducer.furl", u"my_nickname", + "ver23", "oldest_version", {}, realseq) + furl1 = "pb://62ubehyunnyhzs7r6vdonnm2hpi52w6y@127.0.0.1:36106/gydnp" + + privkey_s, _ = keyutil.make_keypair() + privkey, _ = keyutil.parse_privkey(privkey_s) + + ann1 = make_ann_t(ic1, furl1, privkey, seqnum=10) + ann1_old = make_ann_t(ic1, furl1, privkey, seqnum=9) + ann1_new = make_ann_t(ic1, furl1, privkey, seqnum=11) + ann1_noseqnum = make_ann_t(ic1, furl1, privkey, seqnum=None) + ann1_badseqnum = make_ann_t(ic1, furl1, privkey, seqnum="not an int") + + i.remote_publish_v2(ann1, None) + all = i.get_announcements() + self.failUnlessEqual(len(all), 1) + self.failUnlessEqual(all[0].announcement["seqnum"], 10) + self.failUnlessEqual(i._debug_counts["inbound_message"], 1) + self.failUnlessEqual(i._debug_counts["inbound_duplicate"], 0) + self.failUnlessEqual(i._debug_counts["inbound_no_seqnum"], 0) + self.failUnlessEqual(i._debug_counts["inbound_old_replay"], 0) + self.failUnlessEqual(i._debug_counts["inbound_update"], 0) + + i.remote_publish_v2(ann1, None) + all = i.get_announcements() + self.failUnlessEqual(len(all), 1) + self.failUnlessEqual(all[0].announcement["seqnum"], 10) + self.failUnlessEqual(i._debug_counts["inbound_message"], 2) + self.failUnlessEqual(i._debug_counts["inbound_duplicate"], 1) + self.failUnlessEqual(i._debug_counts["inbound_no_seqnum"], 0) + self.failUnlessEqual(i._debug_counts["inbound_old_replay"], 0) + self.failUnlessEqual(i._debug_counts["inbound_update"], 0) + + i.remote_publish_v2(ann1_old, None) + all = i.get_announcements() + self.failUnlessEqual(len(all), 1) + self.failUnlessEqual(all[0].announcement["seqnum"], 10) + self.failUnlessEqual(i._debug_counts["inbound_message"], 3) + self.failUnlessEqual(i._debug_counts["inbound_duplicate"], 1) + self.failUnlessEqual(i._debug_counts["inbound_no_seqnum"], 0) + self.failUnlessEqual(i._debug_counts["inbound_old_replay"], 1) + self.failUnlessEqual(i._debug_counts["inbound_update"], 0) + + i.remote_publish_v2(ann1_new, None) + all = i.get_announcements() + self.failUnlessEqual(len(all), 1) + self.failUnlessEqual(all[0].announcement["seqnum"], 11) + self.failUnlessEqual(i._debug_counts["inbound_message"], 4) + self.failUnlessEqual(i._debug_counts["inbound_duplicate"], 1) + self.failUnlessEqual(i._debug_counts["inbound_no_seqnum"], 0) + self.failUnlessEqual(i._debug_counts["inbound_old_replay"], 1) + self.failUnlessEqual(i._debug_counts["inbound_update"], 1) + + i.remote_publish_v2(ann1_noseqnum, None) + all = i.get_announcements() + self.failUnlessEqual(len(all), 1) + self.failUnlessEqual(all[0].announcement["seqnum"], 11) + self.failUnlessEqual(i._debug_counts["inbound_message"], 5) + self.failUnlessEqual(i._debug_counts["inbound_duplicate"], 1) + self.failUnlessEqual(i._debug_counts["inbound_no_seqnum"], 1) + self.failUnlessEqual(i._debug_counts["inbound_old_replay"], 1) + self.failUnlessEqual(i._debug_counts["inbound_update"], 1) + + i.remote_publish_v2(ann1_badseqnum, None) + all = i.get_announcements() + self.failUnlessEqual(len(all), 1) + self.failUnlessEqual(all[0].announcement["seqnum"], 11) + self.failUnlessEqual(i._debug_counts["inbound_message"], 6) + self.failUnlessEqual(i._debug_counts["inbound_duplicate"], 1) + self.failUnlessEqual(i._debug_counts["inbound_no_seqnum"], 2) + self.failUnlessEqual(i._debug_counts["inbound_old_replay"], 1) + self.failUnlessEqual(i._debug_counts["inbound_update"], 1) + + +NICKNAME = u"n\u00EDickname-%s" # LATIN SMALL LETTER I WITH ACUTE + class SystemTestMixin(ServiceMixin, pollmixin.PollMixin): def create_tub(self, portnum=0): @@ -88,36 +455,88 @@ assert self.central_portnum == portnum tub.setLocation("localhost:%d" % self.central_portnum) -class SystemTest(SystemTestMixin, unittest.TestCase): - - def test_system(self): - self.basedir = "introducer/SystemTest/system" +class Queue(SystemTestMixin, unittest.TestCase): + def test_queue_until_connected(self): + self.basedir = "introducer/QueueUntilConnected/queued" os.makedirs(self.basedir) - return self.do_system_test(IntroducerService) - test_system.timeout = 480 # occasionally takes longer than 350s on "draco" + self.create_tub() + introducer = IntroducerService() + introducer.setServiceParent(self.parent) + iff = os.path.join(self.basedir, "introducer.furl") + ifurl = self.central_tub.registerReference(introducer, furlFile=iff) + tub2 = Tub() + tub2.setServiceParent(self.parent) + c = IntroducerClient(tub2, ifurl, + u"nickname", "version", "oldest", {}, fakeseq) + furl1 = "pb://onug64tu@127.0.0.1:123/short" # base32("short") + sk_s, vk_s = keyutil.make_keypair() + sk, _ignored = keyutil.parse_privkey(sk_s) + + d = introducer.disownServiceParent() + def _offline(ign): + # now that the introducer server is offline, create a client and + # publish some messages + c.setServiceParent(self.parent) # this starts the reconnector + c.publish("storage", make_ann(furl1), sk) + + introducer.setServiceParent(self.parent) # restart the server + # now wait for the messages to be delivered + def _got_announcement(): + return bool(introducer.get_announcements()) + return self.poll(_got_announcement) + d.addCallback(_offline) + def _done(ign): + v = introducer.get_announcements()[0] + furl = v.announcement["anonymous-storage-FURL"] + self.failUnlessEqual(furl, furl1) + d.addCallback(_done) - def do_system_test(self, create_introducer): + # now let the ack get back + def _wait_until_idle(ign): + def _idle(): + if c._debug_outstanding: + return False + if introducer._debug_outstanding: + return False + return True + return self.poll(_idle) + d.addCallback(_wait_until_idle) + return d + + +V1 = "v1"; V2 = "v2" +class SystemTest(SystemTestMixin, unittest.TestCase): + + def do_system_test(self, server_version): self.create_tub() - introducer = create_introducer() + if server_version == V1: + introducer = old.IntroducerService_v1() + else: + introducer = IntroducerService() introducer.setServiceParent(self.parent) iff = os.path.join(self.basedir, "introducer.furl") tub = self.central_tub ifurl = self.central_tub.registerReference(introducer, furlFile=iff) self.introducer_furl = ifurl - NUMCLIENTS = 5 - # we have 5 clients who publish themselves, and an extra one does - # which not. When the connections are fully established, all six nodes + # we have 5 clients who publish themselves as storage servers, and a + # sixth which does which not. All 6 clients subscriber to hear about + # storage. When the connections are fully established, all six nodes # should have 5 connections each. + NUM_STORAGE = 5 + NUM_CLIENTS = 6 clients = [] tubs = {} received_announcements = {} - NUM_SERVERS = NUMCLIENTS subscribing_clients = [] publishing_clients = [] + printable_serverids = {} + self.the_introducer = introducer + privkeys = {} + expected_announcements = [0 for c in range(NUM_CLIENTS)] - for i in range(NUMCLIENTS+1): + for i in range(NUM_CLIENTS): tub = Tub() #tub.setOption("logLocalFailures", True) #tub.setOption("logRemoteFailures", True) @@ -128,62 +547,190 @@ tub.setLocation("localhost:%d" % portnum) log.msg("creating client %d: %s" % (i, tub.getShortTubID())) - c = IntroducerClient(tub, self.introducer_furl, u"nickname-%d" % i, - "version", "oldest") + if i == 0: + c = old.IntroducerClient_v1(tub, self.introducer_furl, + NICKNAME % str(i), + "version", "oldest") + else: + c = IntroducerClient(tub, self.introducer_furl, + NICKNAME % str(i), + "version", "oldest", + {"component": "component-v1"}, fakeseq) received_announcements[c] = {} - def got(serverid, ann_d, announcements): - announcements[serverid] = ann_d - c.subscribe_to("storage", got, received_announcements[c]) + def got(key_s_or_tubid, ann, announcements, i): + if i == 0: + index = get_tubid_string_from_ann(ann) + else: + index = key_s_or_tubid or get_tubid_string_from_ann(ann) + announcements[index] = ann + c.subscribe_to("storage", got, received_announcements[c], i) subscribing_clients.append(c) + expected_announcements[i] += 1 # all expect a 'storage' announcement - if i < NUMCLIENTS: - node_furl = tub.registerReference(Referenceable()) - c.publish(node_furl, "storage", "ri_name") + node_furl = tub.registerReference(Referenceable()) + if i < NUM_STORAGE: + if i == 0: + c.publish(node_furl, "storage", "ri_name") + printable_serverids[i] = get_tubid_string(node_furl) + elif i == 1: + # sign the announcement + privkey_s, pubkey_s = keyutil.make_keypair() + privkey, _ignored = keyutil.parse_privkey(privkey_s) + privkeys[c] = privkey + c.publish("storage", make_ann(node_furl), privkey) + if server_version == V1: + printable_serverids[i] = get_tubid_string(node_furl) + else: + assert pubkey_s.startswith("pub-") + printable_serverids[i] = pubkey_s[len("pub-"):] + else: + c.publish("storage", make_ann(node_furl)) + printable_serverids[i] = get_tubid_string(node_furl) publishing_clients.append(c) - # the last one does not publish anything + else: + # the last one does not publish anything + pass + + if i == 0: + # users of the V1 client were required to publish a + # 'stub_client' record (somewhat after they published the + # 'storage' record), so the introducer could see their + # version. Match that behavior. + c.publish(node_furl, "stub_client", "stub_ri_name") + + if i == 2: + # also publish something that nobody cares about + boring_furl = tub.registerReference(Referenceable()) + c.publish("boring", make_ann(boring_furl)) c.setServiceParent(self.parent) clients.append(c) tubs[c] = tub - def _wait_for_all_connections(): - for c in subscribing_clients: - if len(received_announcements[c]) < NUM_SERVERS: + + def _wait_for_connected(ign): + def _connected(): + for c in clients: + if not c.connected_to_introducer(): + return False + return True + return self.poll(_connected) + + # we watch the clients to determine when the system has settled down. + # Then we can look inside the server to assert things about its + # state. + + def _wait_for_expected_announcements(ign): + def _got_expected_announcements(): + for i,c in enumerate(subscribing_clients): + if len(received_announcements[c]) < expected_announcements[i]: + return False + return True + return self.poll(_got_expected_announcements) + + # before shutting down any Tub, we'd like to know that there are no + # messages outstanding + + def _wait_until_idle(ign): + def _idle(): + for c in subscribing_clients + publishing_clients: + if c._debug_outstanding: + return False + if self.the_introducer._debug_outstanding: return False - return True - d = self.poll(_wait_for_all_connections) + return True + return self.poll(_idle) + + d = defer.succeed(None) + d.addCallback(_wait_for_connected) + d.addCallback(_wait_for_expected_announcements) + d.addCallback(_wait_until_idle) def _check1(res): log.msg("doing _check1") - dc = introducer._debug_counts - self.failUnlessEqual(dc["inbound_message"], NUM_SERVERS) - self.failUnlessEqual(dc["inbound_duplicate"], 0) + dc = self.the_introducer._debug_counts + if server_version == V1: + # each storage server publishes a record, and (after its + # 'subscribe' has been ACKed) also publishes a "stub_client". + # The non-storage client (which subscribes) also publishes a + # stub_client. There is also one "boring" service. The number + # of messages is higher, because the stub_clients aren't + # published until after we get the 'subscribe' ack (since we + # don't realize that we're dealing with a v1 server [which + # needs stub_clients] until then), and the act of publishing + # the stub_client causes us to re-send all previous + # announcements. + self.failUnlessEqual(dc["inbound_message"] - dc["inbound_duplicate"], + NUM_STORAGE + NUM_CLIENTS + 1) + else: + # each storage server publishes a record. There is also one + # "stub_client" and one "boring" + self.failUnlessEqual(dc["inbound_message"], NUM_STORAGE+2) + self.failUnlessEqual(dc["inbound_duplicate"], 0) self.failUnlessEqual(dc["inbound_update"], 0) - self.failUnless(dc["outbound_message"]) + self.failUnlessEqual(dc["inbound_subscribe"], NUM_CLIENTS) + # the number of outbound messages is tricky.. I think it depends + # upon a race between the publish and the subscribe messages. + self.failUnless(dc["outbound_message"] > 0) + # each client subscribes to "storage", and each server publishes + self.failUnlessEqual(dc["outbound_announcements"], + NUM_STORAGE*NUM_CLIENTS) - for c in clients: - self.failUnless(c.connected_to_introducer()) for c in subscribing_clients: cdc = c._debug_counts self.failUnless(cdc["inbound_message"]) self.failUnlessEqual(cdc["inbound_announcement"], - NUM_SERVERS) + NUM_STORAGE) self.failUnlessEqual(cdc["wrong_service"], 0) self.failUnlessEqual(cdc["duplicate_announcement"], 0) self.failUnlessEqual(cdc["update"], 0) self.failUnlessEqual(cdc["new_announcement"], - NUM_SERVERS) + NUM_STORAGE) anns = received_announcements[c] - self.failUnlessEqual(len(anns), NUM_SERVERS) + self.failUnlessEqual(len(anns), NUM_STORAGE) - nodeid0 = b32decode(tubs[clients[0]].tubID.upper()) - ann_d = anns[nodeid0] - nick = ann_d["nickname"] + nodeid0 = tubs[clients[0]].tubID + ann = anns[nodeid0] + nick = ann["nickname"] self.failUnlessEqual(type(nick), unicode) - self.failUnlessEqual(nick, u"nickname-0") - for c in publishing_clients: - cdc = c._debug_counts - self.failUnlessEqual(cdc["outbound_message"], 1) + self.failUnlessEqual(nick, NICKNAME % "0") + if server_version == V1: + for c in publishing_clients: + cdc = c._debug_counts + expected = 1 # storage + if c is clients[2]: + expected += 1 # boring + if c is not clients[0]: + # the v2 client tries to call publish_v2, which fails + # because the server is v1. It then re-sends + # everything it has so far, plus a stub_client record + expected = 2*expected + 1 + if c is clients[0]: + # we always tell v1 client to send stub_client + expected += 1 + self.failUnlessEqual(cdc["outbound_message"], expected) + else: + for c in publishing_clients: + cdc = c._debug_counts + expected = 1 + if c in [clients[0], # stub_client + clients[2], # boring + ]: + expected = 2 + self.failUnlessEqual(cdc["outbound_message"], expected) + # now check the web status, make sure it renders without error + ir = introweb.IntroducerRoot(self.parent) + self.parent.nodeid = "NODEID" + text = ir.renderSynchronously().decode("utf-8") + self.failUnlessIn(NICKNAME % "0", text) # the v1 client + self.failUnlessIn(NICKNAME % "1", text) # a v2 client + for i in range(NUM_STORAGE): + self.failUnlessIn(printable_serverids[i], text, + (i,printable_serverids[i],text)) + # make sure there isn't a double-base32ed string too + self.failIfIn(idlib.nodeid_b2a(printable_serverids[i]), text, + (i,printable_serverids[i],text)) + log.msg("_check1 done") d.addCallback(_check1) # force an introducer reconnect, by shutting down the Tub it's using @@ -196,67 +743,54 @@ d.addCallback(lambda _ign: log.msg("shutting down introducer's Tub")) d.addCallback(lambda _ign: self.central_tub.disownServiceParent()) - def _wait_for_introducer_loss(): - for c in clients: - if c.connected_to_introducer(): - return False - return True - d.addCallback(lambda res: self.poll(_wait_for_introducer_loss)) + def _wait_for_introducer_loss(ign): + def _introducer_lost(): + for c in clients: + if c.connected_to_introducer(): + return False + return True + return self.poll(_introducer_lost) + d.addCallback(_wait_for_introducer_loss) def _restart_introducer_tub(_ign): log.msg("restarting introducer's Tub") - - dc = introducer._debug_counts - self.expected_count = dc["inbound_message"] + NUM_SERVERS - self.expected_subscribe_count = dc["inbound_subscribe"] + NUMCLIENTS+1 - introducer._debug0 = dc["outbound_message"] - for c in subscribing_clients: - cdc = c._debug_counts - c._debug0 = cdc["inbound_message"] - + # reset counters + for i in range(NUM_CLIENTS): + c = subscribing_clients[i] + for k in c._debug_counts: + c._debug_counts[k] = 0 + for k in self.the_introducer._debug_counts: + self.the_introducer._debug_counts[k] = 0 + expected_announcements[i] += 1 # new 'storage' for everyone self.create_tub(self.central_portnum) - newfurl = self.central_tub.registerReference(introducer, + newfurl = self.central_tub.registerReference(self.the_introducer, furlFile=iff) assert newfurl == self.introducer_furl d.addCallback(_restart_introducer_tub) - def _wait_for_introducer_reconnect(): - # wait until: - # all clients are connected - # the introducer has received publish messages from all of them - # the introducer has received subscribe messages from all of them - # the introducer has sent (duplicate) announcements to all of them - # all clients have received (duplicate) announcements - dc = introducer._debug_counts - for c in clients: - if not c.connected_to_introducer(): - return False - if dc["inbound_message"] < self.expected_count: - return False - if dc["inbound_subscribe"] < self.expected_subscribe_count: - return False - for c in subscribing_clients: - cdc = c._debug_counts - if cdc["inbound_message"] < c._debug0+1: - return False - return True - d.addCallback(lambda res: self.poll(_wait_for_introducer_reconnect)) + d.addCallback(_wait_for_connected) + d.addCallback(_wait_for_expected_announcements) + d.addCallback(_wait_until_idle) + d.addCallback(lambda _ign: log.msg(" reconnected")) + # TODO: publish something while the introducer is offline, then + # confirm it gets delivered when the connection is reestablished def _check2(res): log.msg("doing _check2") # assert that the introducer sent out new messages, one per # subscriber - dc = introducer._debug_counts - self.failUnlessEqual(dc["inbound_message"], 2*NUM_SERVERS) - self.failUnlessEqual(dc["inbound_duplicate"], NUM_SERVERS) - self.failUnlessEqual(dc["inbound_update"], 0) - self.failUnlessEqual(dc["outbound_message"], - introducer._debug0 + len(subscribing_clients)) - for c in clients: - self.failUnless(c.connected_to_introducer()) + dc = self.the_introducer._debug_counts + self.failUnlessEqual(dc["outbound_announcements"], + NUM_STORAGE*NUM_CLIENTS) + self.failUnless(dc["outbound_message"] > 0) + self.failUnlessEqual(dc["inbound_subscribe"], NUM_CLIENTS) for c in subscribing_clients: cdc = c._debug_counts - self.failUnlessEqual(cdc["duplicate_announcement"], NUM_SERVERS) + self.failUnlessEqual(cdc["inbound_message"], 1) + self.failUnlessEqual(cdc["inbound_announcement"], NUM_STORAGE) + self.failUnlessEqual(cdc["new_announcement"], 0) + self.failUnlessEqual(cdc["wrong_service"], 0) + self.failUnlessEqual(cdc["duplicate_announcement"], NUM_STORAGE) d.addCallback(_check2) # Then force an introducer restart, by shutting down the Tub, @@ -267,71 +801,247 @@ d.addCallback(lambda _ign: log.msg("shutting down introducer")) d.addCallback(lambda _ign: self.central_tub.disownServiceParent()) - d.addCallback(lambda res: self.poll(_wait_for_introducer_loss)) + d.addCallback(_wait_for_introducer_loss) + d.addCallback(lambda _ign: log.msg("introducer lost")) def _restart_introducer(_ign): log.msg("restarting introducer") self.create_tub(self.central_portnum) - - for c in subscribing_clients: - # record some counters for later comparison. Stash the values - # on the client itself, because I'm lazy. - cdc = c._debug_counts - c._debug1 = cdc["inbound_announcement"] - c._debug2 = cdc["inbound_message"] - c._debug3 = cdc["new_announcement"] - newintroducer = create_introducer() - self.expected_message_count = NUM_SERVERS - self.expected_announcement_count = NUM_SERVERS*len(subscribing_clients) - self.expected_subscribe_count = len(subscribing_clients) - newfurl = self.central_tub.registerReference(newintroducer, + # reset counters + for i in range(NUM_CLIENTS): + c = subscribing_clients[i] + for k in c._debug_counts: + c._debug_counts[k] = 0 + expected_announcements[i] += 1 # new 'storage' for everyone + if server_version == V1: + introducer = old.IntroducerService_v1() + else: + introducer = IntroducerService() + self.the_introducer = introducer + newfurl = self.central_tub.registerReference(self.the_introducer, furlFile=iff) assert newfurl == self.introducer_furl d.addCallback(_restart_introducer) - def _wait_for_introducer_reconnect2(): - # wait until: - # all clients are connected - # the introducer has received publish messages from all of them - # the introducer has received subscribe messages from all of them - # the introducer has sent announcements for everybody to everybody - # all clients have received all the (duplicate) announcements - # at that point, the system should be quiescent - dc = introducer._debug_counts - for c in clients: - if not c.connected_to_introducer(): - return False - if dc["inbound_message"] < self.expected_message_count: - return False - if dc["outbound_announcements"] < self.expected_announcement_count: - return False - if dc["inbound_subscribe"] < self.expected_subscribe_count: - return False - for c in subscribing_clients: - cdc = c._debug_counts - if cdc["inbound_announcement"] < c._debug1+NUM_SERVERS: - return False - return True - d.addCallback(lambda res: self.poll(_wait_for_introducer_reconnect2)) + + d.addCallback(_wait_for_connected) + d.addCallback(_wait_for_expected_announcements) + d.addCallback(_wait_until_idle) def _check3(res): log.msg("doing _check3") - for c in clients: - self.failUnless(c.connected_to_introducer()) + dc = self.the_introducer._debug_counts + self.failUnlessEqual(dc["outbound_announcements"], + NUM_STORAGE*NUM_CLIENTS) + self.failUnless(dc["outbound_message"] > 0) + self.failUnlessEqual(dc["inbound_subscribe"], NUM_CLIENTS) for c in subscribing_clients: cdc = c._debug_counts - self.failUnless(cdc["inbound_announcement"] > c._debug1) - self.failUnless(cdc["inbound_message"] > c._debug2) - # there should have been no new announcements - self.failUnlessEqual(cdc["new_announcement"], c._debug3) - # and the right number of duplicate ones. There were - # NUM_SERVERS from the servertub restart, and there should be - # another NUM_SERVERS now - self.failUnlessEqual(cdc["duplicate_announcement"], - 2*NUM_SERVERS) + self.failUnless(cdc["inbound_message"] > 0) + self.failUnlessEqual(cdc["inbound_announcement"], NUM_STORAGE) + self.failUnlessEqual(cdc["new_announcement"], 0) + self.failUnlessEqual(cdc["wrong_service"], 0) + self.failUnlessEqual(cdc["duplicate_announcement"], NUM_STORAGE) d.addCallback(_check3) return d + + def test_system_v2_server(self): + self.basedir = "introducer/SystemTest/system_v2_server" + os.makedirs(self.basedir) + return self.do_system_test(V2) + test_system_v2_server.timeout = 480 + # occasionally takes longer than 350s on "draco" + + def test_system_v1_server(self): + self.basedir = "introducer/SystemTest/system_v1_server" + os.makedirs(self.basedir) + return self.do_system_test(V1) + test_system_v1_server.timeout = 480 + # occasionally takes longer than 350s on "draco" + +class FakeRemoteReference: + def notifyOnDisconnect(self, *args, **kwargs): pass + def getRemoteTubID(self): return "62ubehyunnyhzs7r6vdonnm2hpi52w6y" + def getLocationHints(self): return [("ipv4", "here.example.com", "1234"), + ("ipv4", "there.example.com", "2345")] + def getPeer(self): return address.IPv4Address("TCP", "remote.example.com", + 3456) + +class ClientInfo(unittest.TestCase): + def test_client_v2(self): + introducer = IntroducerService() + tub = introducer_furl = None + app_versions = {"whizzy": "fizzy"} + client_v2 = IntroducerClient(tub, introducer_furl, NICKNAME % u"v2", + "my_version", "oldest", app_versions, + fakeseq) + #furl1 = "pb://62ubehyunnyhzs7r6vdonnm2hpi52w6y@127.0.0.1:0/swissnum" + #ann_s = make_ann_t(client_v2, furl1, None, 10) + #introducer.remote_publish_v2(ann_s, Referenceable()) + subscriber = FakeRemoteReference() + introducer.remote_subscribe_v2(subscriber, "storage", + client_v2._my_subscriber_info) + subs = introducer.get_subscribers() + self.failUnlessEqual(len(subs), 1) + s0 = subs[0] + self.failUnlessEqual(s0.service_name, "storage") + self.failUnlessEqual(s0.app_versions, app_versions) + self.failUnlessEqual(s0.nickname, NICKNAME % u"v2") + self.failUnlessEqual(s0.version, "my_version") + + def test_client_v1(self): + introducer = IntroducerService() + subscriber = FakeRemoteReference() + introducer.remote_subscribe(subscriber, "storage") + # the v1 subscribe interface had no subscriber_info: that was usually + # sent in a separate stub_client pseudo-announcement + subs = introducer.get_subscribers() + self.failUnlessEqual(len(subs), 1) + s0 = subs[0] + self.failUnlessEqual(s0.nickname, u"?") # not known yet + self.failUnlessEqual(s0.service_name, "storage") + + # now submit the stub_client announcement + furl1 = "pb://62ubehyunnyhzs7r6vdonnm2hpi52w6y@127.0.0.1:0/swissnum" + ann = (furl1, "stub_client", "RIStubClient", + (NICKNAME % u"v1").encode("utf-8"), "my_version", "oldest") + introducer.remote_publish(ann) + # the server should correlate the two + subs = introducer.get_subscribers() + self.failUnlessEqual(len(subs), 1) + s0 = subs[0] + self.failUnlessEqual(s0.service_name, "storage") + # v1 announcements do not contain app-versions + self.failUnlessEqual(s0.app_versions, {}) + self.failUnlessEqual(s0.nickname, NICKNAME % u"v1") + self.failUnlessEqual(s0.version, "my_version") + + # a subscription that arrives after the stub_client announcement + # should be correlated too + subscriber2 = FakeRemoteReference() + introducer.remote_subscribe(subscriber2, "thing2") + + subs = introducer.get_subscribers() + self.failUnlessEqual(len(subs), 2) + s0 = [s for s in subs if s.service_name == "thing2"][0] + # v1 announcements do not contain app-versions + self.failUnlessEqual(s0.app_versions, {}) + self.failUnlessEqual(s0.nickname, NICKNAME % u"v1") + self.failUnlessEqual(s0.version, "my_version") + +class Announcements(unittest.TestCase): + def test_client_v2_unsigned(self): + introducer = IntroducerService() + tub = introducer_furl = None + app_versions = {"whizzy": "fizzy"} + client_v2 = IntroducerClient(tub, introducer_furl, u"nick-v2", + "my_version", "oldest", app_versions, + fakeseq) + furl1 = "pb://62ubehyunnyhzs7r6vdonnm2hpi52w6y@127.0.0.1:0/swissnum" + tubid = "62ubehyunnyhzs7r6vdonnm2hpi52w6y" + ann_s0 = make_ann_t(client_v2, furl1, None, 10) + canary0 = Referenceable() + introducer.remote_publish_v2(ann_s0, canary0) + a = introducer.get_announcements() + self.failUnlessEqual(len(a), 1) + self.failUnlessIdentical(a[0].canary, canary0) + self.failUnlessEqual(a[0].index, ("storage", None, tubid)) + self.failUnlessEqual(a[0].announcement["app-versions"], app_versions) + self.failUnlessEqual(a[0].nickname, u"nick-v2") + self.failUnlessEqual(a[0].service_name, "storage") + self.failUnlessEqual(a[0].version, "my_version") + self.failUnlessEqual(a[0].announcement["anonymous-storage-FURL"], furl1) + + def test_client_v2_signed(self): + introducer = IntroducerService() + tub = introducer_furl = None + app_versions = {"whizzy": "fizzy"} + client_v2 = IntroducerClient(tub, introducer_furl, u"nick-v2", + "my_version", "oldest", app_versions, + fakeseq) + furl1 = "pb://62ubehyunnyhzs7r6vdonnm2hpi52w6y@127.0.0.1:0/swissnum" + sk_s, vk_s = keyutil.make_keypair() + sk, _ignored = keyutil.parse_privkey(sk_s) + pks = keyutil.remove_prefix(vk_s, "pub-") + ann_t0 = make_ann_t(client_v2, furl1, sk, 10) + canary0 = Referenceable() + introducer.remote_publish_v2(ann_t0, canary0) + a = introducer.get_announcements() + self.failUnlessEqual(len(a), 1) + self.failUnlessIdentical(a[0].canary, canary0) + self.failUnlessEqual(a[0].index, ("storage", pks, None)) + self.failUnlessEqual(a[0].announcement["app-versions"], app_versions) + self.failUnlessEqual(a[0].nickname, u"nick-v2") + self.failUnlessEqual(a[0].service_name, "storage") + self.failUnlessEqual(a[0].version, "my_version") + self.failUnlessEqual(a[0].announcement["anonymous-storage-FURL"], furl1) + + def test_client_v1(self): + introducer = IntroducerService() + + furl1 = "pb://62ubehyunnyhzs7r6vdonnm2hpi52w6y@127.0.0.1:0/swissnum" + tubid = "62ubehyunnyhzs7r6vdonnm2hpi52w6y" + ann = (furl1, "storage", "RIStorage", + u"nick-v1".encode("utf-8"), "my_version", "oldest") + introducer.remote_publish(ann) + + a = introducer.get_announcements() + self.failUnlessEqual(len(a), 1) + self.failUnlessEqual(a[0].index, ("storage", None, tubid)) + self.failUnlessEqual(a[0].canary, None) + self.failUnlessEqual(a[0].announcement["app-versions"], {}) + self.failUnlessEqual(a[0].nickname, u"nick-v1".encode("utf-8")) + self.failUnlessEqual(a[0].service_name, "storage") + self.failUnlessEqual(a[0].version, "my_version") + self.failUnlessEqual(a[0].announcement["anonymous-storage-FURL"], furl1) + +class ClientSeqnums(unittest.TestCase): + def test_client(self): + basedir = "introducer/ClientSeqnums/test_client" + fileutil.make_dirs(basedir) + f = open(os.path.join(basedir, "tahoe.cfg"), "w") + f.write("[client]\n") + f.write("introducer.furl = nope\n") + f.close() + c = TahoeClient(basedir) + ic = c.introducer_client + outbound = ic._outbound_announcements + published = ic._published_announcements + def read_seqnum(): + f = open(os.path.join(basedir, "announcement-seqnum")) + seqnum = f.read().strip() + f.close() + return int(seqnum) + + ic.publish("sA", {"key": "value1"}, c._node_key) + self.failUnlessEqual(read_seqnum(), 1) + self.failUnless("sA" in outbound) + self.failUnlessEqual(outbound["sA"]["seqnum"], 1) + nonce1 = outbound["sA"]["nonce"] + self.failUnless(isinstance(nonce1, str)) + self.failUnlessEqual(simplejson.loads(published["sA"][0]), + outbound["sA"]) + # [1] is the signature, [2] is the pubkey + + # publishing a second service causes both services to be + # re-published, with the next higher sequence number + ic.publish("sB", {"key": "value2"}, c._node_key) + self.failUnlessEqual(read_seqnum(), 2) + self.failUnless("sB" in outbound) + self.failUnlessEqual(outbound["sB"]["seqnum"], 2) + self.failUnless("sA" in outbound) + self.failUnlessEqual(outbound["sA"]["seqnum"], 2) + nonce2 = outbound["sA"]["nonce"] + self.failUnless(isinstance(nonce2, str)) + self.failIfEqual(nonce1, nonce2) + self.failUnlessEqual(simplejson.loads(published["sA"][0]), + outbound["sA"]) + self.failUnlessEqual(simplejson.loads(published["sB"][0]), + outbound["sB"]) + + + class TooNewServer(IntroducerService): VERSION = { "http://allmydata.org/tahoe/protocols/introducer/v999": { }, @@ -359,10 +1069,11 @@ tub.setLocation("localhost:%d" % portnum) c = IntroducerClient(tub, self.introducer_furl, - u"nickname-client", "version", "oldest") + u"nickname-client", "version", "oldest", {}, + fakeseq) announcements = {} - def got(serverid, ann_d): - announcements[serverid] = ann_d + def got(key_s, ann): + announcements[key_s] = ann c.subscribe_to("storage", got) c.setServiceParent(self.parent) @@ -374,7 +1085,8 @@ d = self.poll(_got_bad) def _done(res): self.failUnless(c._introducer_error) - self.failUnless(c._introducer_error.check(InsufficientVersionError)) + self.failUnless(c._introducer_error.check(InsufficientVersionError), + c._introducer_error) d.addCallback(_done) return d @@ -388,3 +1100,44 @@ nodeid = b32decode(m.group(1).upper()) self.failUnlessEqual(nodeid, "\x9fM\xf2\x19\xcckU0\xbf\x03\r\x10\x99\xfb&\x9b-\xc7A\x1d") +class Signatures(unittest.TestCase): + def test_sign(self): + ann = {"key1": "value1"} + sk_s,vk_s = keyutil.make_keypair() + sk,ignored = keyutil.parse_privkey(sk_s) + ann_t = sign_to_foolscap(ann, sk) + (msg, sig, key) = ann_t + self.failUnlessEqual(type(msg), type("".encode("utf-8"))) # bytes + self.failUnlessEqual(simplejson.loads(msg.decode("utf-8")), ann) + self.failUnless(sig.startswith("v0-")) + self.failUnless(key.startswith("v0-")) + (ann2,key2) = unsign_from_foolscap(ann_t) + self.failUnlessEqual(ann2, ann) + self.failUnlessEqual("pub-"+key2, vk_s) + + # bad signature + bad_ann = {"key1": "value2"} + bad_msg = simplejson.dumps(bad_ann).encode("utf-8") + self.failUnlessRaises(keyutil.BadSignatureError, + unsign_from_foolscap, (bad_msg,sig,key)) + # sneaky bad signature should be ignored + (ann2,key2) = unsign_from_foolscap( (bad_msg,None,key) ) + self.failUnlessEqual(key2, None) + self.failUnlessEqual(ann2, bad_ann) + + # unrecognized signatures + self.failUnlessRaises(UnknownKeyError, + unsign_from_foolscap, (bad_msg,"v999-sig",key)) + self.failUnlessRaises(UnknownKeyError, + unsign_from_foolscap, (bad_msg,sig,"v999-key")) + + +# add tests of StorageFarmBroker: if it receives duplicate announcements, it +# should leave the Reconnector in place, also if it receives +# same-FURL-different-misc, but if it receives same-nodeid-different-FURL, it +# should tear down the Reconnector and make a new one. This behavior used to +# live in the IntroducerClient, and thus used to be tested by test_introducer + +# copying more tests from old branch: + +# then also add Upgrade test diff -Nru tahoe-lafs-1.9.2/src/allmydata/test/test_mutable.py tahoe-lafs-1.10.0/src/allmydata/test/test_mutable.py --- tahoe-lafs-1.9.2/src/allmydata/test/test_mutable.py 2012-07-03 16:32:46.000000000 +0000 +++ tahoe-lafs-1.10.0/src/allmydata/test/test_mutable.py 2013-09-03 15:38:27.000000000 +0000 @@ -21,7 +21,7 @@ from allmydata.scripts import debug from allmydata.mutable.filenode import MutableFileNode, BackoffAgent -from allmydata.mutable.common import ResponseCache, \ +from allmydata.mutable.common import \ MODE_CHECK, MODE_ANYTHING, MODE_WRITE, MODE_READ, \ NeedMoreDataError, UnrecoverableFileError, UncoordinatedWriteError, \ NotEnoughServersError, CorruptShareError @@ -234,7 +234,9 @@ storage_broker = StorageFarmBroker(None, True) for peerid in peerids: fss = FakeStorageServer(peerid, s) - storage_broker.test_add_rref(peerid, fss) + ann = {"anonymous-storage-FURL": "pb://%s@nowhere/fake" % base32.b2a(peerid), + "permutation-seed-base32": base32.b2a(peerid) } + storage_broker.test_add_rref(peerid, fss, ann) return storage_broker def make_nodemaker(s=None, num_peers=10, keysize=TEST_RSA_KEY_SIZE): @@ -482,7 +484,7 @@ dumped = servermap.dump(StringIO()) self.failUnlessIn("3-of-10", dumped.getvalue()) d.addCallback(_then) - # Now overwrite the contents with some new contents. We want + # Now overwrite the contents with some new contents. We want # to make them big enough to force the file to be uploaded # in more than one segment. big_contents = "contents1" * 100000 # about 900 KiB @@ -497,7 +499,7 @@ # before, they need to be big enough to force multiple # segments, so that we make the downloader deal with # multiple segments. - bigger_contents = "contents2" * 1000000 # about 9MiB + bigger_contents = "contents2" * 1000000 # about 9MiB bigger_contents_uploadable = MutableData(bigger_contents) d.addCallback(lambda ignored: n.overwrite(bigger_contents_uploadable)) @@ -637,25 +639,6 @@ d.addCallback(_created) return d - - def test_response_cache_memory_leak(self): - d = self.nodemaker.create_mutable_file("contents") - def _created(n): - d = n.download_best_version() - d.addCallback(lambda res: self.failUnlessEqual(res, "contents")) - d.addCallback(lambda ign: self.failUnless(isinstance(n._cache, ResponseCache))) - - def _check_cache(expected): - # The total size of cache entries should not increase on the second download; - # in fact the cache contents should be identical. - d2 = n.download_best_version() - d2.addCallback(lambda rep: self.failUnlessEqual(repr(n._cache.cache), expected)) - return d2 - d.addCallback(lambda ign: _check_cache(repr(n._cache.cache))) - return d - d.addCallback(_created) - return d - def test_create_with_initial_contents_function(self): data = "initial contents" def _make_contents(n): @@ -1499,7 +1482,7 @@ def test_corrupt_all_encprivkey_late(self): - # this should work for the same reason as above, but we corrupt + # this should work for the same reason as above, but we corrupt # after the servermap update to exercise the error handling # code. # We need to remove the privkey from the node, or the retrieve @@ -1526,15 +1509,6 @@ corrupt_early=False, failure_checker=_check) - def test_corrupt_all_block_hash_tree_late(self): - def _check(res): - f = res[0] - self.failUnless(f.check(NotEnoughSharesError)) - return self._test_corrupt_all("block_hash_tree", - "block hash tree failure", - corrupt_early=False, - failure_checker=_check) - def test_corrupt_all_block_late(self): def _check(res): @@ -1616,17 +1590,20 @@ d.addCallback(lambda ignored: self._test_corrupt_all(("block_hash_tree", 12 * 32), "block hash tree failure", - corrupt_early=False, + corrupt_early=True, should_succeed=False)) return d def test_corrupt_mdmf_block_hash_tree_late(self): + # Note - there is no SDMF counterpart to this test, as the SDMF + # files are guaranteed to have exactly one block, and therefore + # the block hash tree fits within the initial read (#1240). d = self.publish_mdmf() d.addCallback(lambda ignored: self._test_corrupt_all(("block_hash_tree", 12 * 32), "block hash tree failure", - corrupt_early=True, + corrupt_early=False, should_succeed=False)) return d @@ -1658,14 +1635,14 @@ return r def check_expected_failure(self, r, expected_exception, substring, where): - for (peerid, storage_index, shnum, f) in r.problems: + for (peerid, storage_index, shnum, f) in r.get_share_problems(): if f.check(expected_exception): self.failUnless(substring in str(f), "%s: substring '%s' not in '%s'" % (where, substring, str(f))) return self.fail("%s: didn't see expected exception %s in problems %s" % - (where, expected_exception, r.problems)) + (where, expected_exception, r.get_share_problems())) class Checker(unittest.TestCase, CheckerMixin, PublishMixin): @@ -2231,9 +2208,9 @@ # then mix up the shares, to make sure that download survives seeing # a variety of encodings. This is actually kind of tricky to set up. - contents1 = "Contents for encoding 1 (3-of-10) go here" - contents2 = "Contents for encoding 2 (4-of-9) go here" - contents3 = "Contents for encoding 3 (4-of-7) go here" + contents1 = "Contents for encoding 1 (3-of-10) go here"*1000 + contents2 = "Contents for encoding 2 (4-of-9) go here"*1000 + contents3 = "Contents for encoding 3 (4-of-7) go here"*1000 # we make a retrieval object that doesn't know what encoding # parameters to use @@ -2401,39 +2378,6 @@ return d -class Utils(unittest.TestCase): - def test_cache(self): - c = ResponseCache() - # xdata = base62.b2a(os.urandom(100))[:100] - xdata = "1Ex4mdMaDyOl9YnGBM3I4xaBF97j8OQAg1K3RBR01F2PwTP4HohB3XpACuku8Xj4aTQjqJIR1f36mEj3BCNjXaJmPBEZnnHL0U9l" - ydata = "4DCUQXvkEPnnr9Lufikq5t21JsnzZKhzxKBhLhrBB6iIcBOWRuT4UweDhjuKJUre8A4wOObJnl3Kiqmlj4vjSLSqUGAkUD87Y3vs" - c.add("v1", 1, 0, xdata) - c.add("v1", 1, 2000, ydata) - self.failUnlessEqual(c.read("v2", 1, 10, 11), None) - self.failUnlessEqual(c.read("v1", 2, 10, 11), None) - self.failUnlessEqual(c.read("v1", 1, 0, 10), xdata[:10]) - self.failUnlessEqual(c.read("v1", 1, 90, 10), xdata[90:]) - self.failUnlessEqual(c.read("v1", 1, 300, 10), None) - self.failUnlessEqual(c.read("v1", 1, 2050, 5), ydata[50:55]) - self.failUnlessEqual(c.read("v1", 1, 0, 101), None) - self.failUnlessEqual(c.read("v1", 1, 99, 1), xdata[99:100]) - self.failUnlessEqual(c.read("v1", 1, 100, 1), None) - self.failUnlessEqual(c.read("v1", 1, 1990, 9), None) - self.failUnlessEqual(c.read("v1", 1, 1990, 10), None) - self.failUnlessEqual(c.read("v1", 1, 1990, 11), None) - self.failUnlessEqual(c.read("v1", 1, 1990, 15), None) - self.failUnlessEqual(c.read("v1", 1, 1990, 19), None) - self.failUnlessEqual(c.read("v1", 1, 1990, 20), None) - self.failUnlessEqual(c.read("v1", 1, 1990, 21), None) - self.failUnlessEqual(c.read("v1", 1, 1990, 25), None) - self.failUnlessEqual(c.read("v1", 1, 1999, 25), None) - - # test joining fragments - c = ResponseCache() - c.add("v1", 1, 0, xdata[:10]) - c.add("v1", 1, 10, xdata[10:20]) - self.failUnlessEqual(c.read("v1", 1, 0, 20), xdata[:20]) - class Exceptions(unittest.TestCase): def test_repr(self): nmde = NeedMoreDataError(100, 50, 100) @@ -2441,6 +2385,7 @@ ucwe = UncoordinatedWriteError() self.failUnless("UncoordinatedWriteError" in repr(ucwe), repr(ucwe)) + class SameKeyGenerator: def __init__(self, pubkey, privkey): self.pubkey = pubkey @@ -2512,7 +2457,7 @@ self.basedir = "mutable/Problems/test_retrieve_surprise" self.set_up_grid() nm = self.g.clients[0].nodemaker - d = nm.create_mutable_file(MutableData("contents 1")) + d = nm.create_mutable_file(MutableData("contents 1"*4000)) def _created(n): d = defer.succeed(None) d.addCallback(lambda res: n.get_servermap(MODE_READ)) @@ -2526,7 +2471,6 @@ # now attempt to retrieve the old version with the old servermap. # This will look like someone has changed the file since we # updated the servermap. - d.addCallback(lambda res: n._cache._clear()) d.addCallback(lambda res: log.msg("starting doomed read")) d.addCallback(lambda res: self.shouldFail(NotEnoughSharesError, diff -Nru tahoe-lafs-1.9.2/src/allmydata/test/test_no_network.py tahoe-lafs-1.10.0/src/allmydata/test/test_no_network.py --- tahoe-lafs-1.9.2/src/allmydata/test/test_no_network.py 2012-05-14 02:07:31.000000000 +0000 +++ tahoe-lafs-1.10.0/src/allmydata/test/test_no_network.py 2013-09-03 15:38:27.000000000 +0000 @@ -31,7 +31,7 @@ data = Data(DATA, "") d = c0.upload(data) def _uploaded(res): - n = c0.create_node_from_uri(res.uri) + n = c0.create_node_from_uri(res.get_uri()) return download_to_data(n) d.addCallback(_uploaded) def _check(res): diff -Nru tahoe-lafs-1.9.2/src/allmydata/test/test_node.py tahoe-lafs-1.10.0/src/allmydata/test/test_node.py --- tahoe-lafs-1.9.2/src/allmydata/test/test_node.py 2012-05-14 02:24:30.000000000 +0000 +++ tahoe-lafs-1.10.0/src/allmydata/test/test_node.py 2013-09-03 15:38:27.000000000 +0000 @@ -8,7 +8,7 @@ from foolscap.api import flushEventualQueue from twisted.application import service -from allmydata.node import Node, formatTimeTahoeStyle +from allmydata.node import Node, formatTimeTahoeStyle, MissingConfigEntry from allmydata.util import fileutil import allmydata.test.common_util as testutil @@ -87,6 +87,34 @@ u"\u2621")) return d + def test_private_config(self): + basedir = "test_node/test_private_config" + privdir = os.path.join(basedir, "private") + fileutil.make_dirs(privdir) + f = open(os.path.join(privdir, 'already'), 'wt') + f.write("secret") + f.close() + + n = TestNode(basedir) + self.failUnlessEqual(n.get_private_config("already"), "secret") + self.failUnlessEqual(n.get_private_config("not", "default"), "default") + self.failUnlessRaises(MissingConfigEntry, n.get_private_config, "not") + value = n.get_or_create_private_config("new", "start") + self.failUnlessEqual(value, "start") + self.failUnlessEqual(n.get_private_config("new"), "start") + counter = [] + def make_newer(): + counter.append("called") + return "newer" + value = n.get_or_create_private_config("newer", make_newer) + self.failUnlessEqual(len(counter), 1) + self.failUnlessEqual(value, "newer") + self.failUnlessEqual(n.get_private_config("newer"), "newer") + + value = n.get_or_create_private_config("newer", make_newer) + self.failUnlessEqual(len(counter), 1) # don't call unless necessary + self.failUnlessEqual(value, "newer") + def test_timestamp(self): # this modified logger doesn't seem to get used during the tests, # probably because we don't modify the LogObserver that trial diff -Nru tahoe-lafs-1.9.2/src/allmydata/test/test_provisioning.py tahoe-lafs-1.10.0/src/allmydata/test/test_provisioning.py --- tahoe-lafs-1.9.2/src/allmydata/test/test_provisioning.py 2012-05-14 02:07:31.000000000 +0000 +++ tahoe-lafs-1.10.0/src/allmydata/test/test_provisioning.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,113 +0,0 @@ - -from twisted.trial import unittest -from allmydata import provisioning -ReliabilityModel = None -try: - from allmydata.reliability import ReliabilityModel -except ImportError: - pass # might not be importable, since it needs NumPy - -from nevow import inevow -from zope.interface import implements - -class MyRequest: - implements(inevow.IRequest) - pass - -class Provisioning(unittest.TestCase): - def getarg(self, name, astype=int): - if name in self.fields: - return astype(self.fields[name]) - return None - - def test_load(self): - pt = provisioning.ProvisioningTool() - self.fields = {} - #r = MyRequest() - #r.fields = self.fields - #ctx = RequestContext() - #unfilled = pt.renderSynchronously(ctx) - lots_of_stan = pt.do_forms(self.getarg) - self.failUnless(lots_of_stan is not None) - - self.fields = {'filled': True, - "num_users": 50e3, - "files_per_user": 1000, - "space_per_user": 1e9, - "sharing_ratio": 1.0, - "encoding_parameters": "3-of-10-5", - "num_servers": 30, - "ownership_mode": "A", - "download_rate": 100, - "upload_rate": 10, - "delete_rate": 10, - "lease_timer": 7, - } - #filled = pt.renderSynchronously(ctx) - more_stan = pt.do_forms(self.getarg) - self.failUnless(more_stan is not None) - - # trigger the wraparound configuration - self.fields["num_servers"] = 5 - #filled = pt.renderSynchronously(ctx) - more_stan = pt.do_forms(self.getarg) - - # and other ownership modes - self.fields["ownership_mode"] = "B" - more_stan = pt.do_forms(self.getarg) - self.fields["ownership_mode"] = "E" - more_stan = pt.do_forms(self.getarg) - - def test_provisioning_math(self): - self.failUnlessEqual(provisioning.binomial(10, 0), 1) - self.failUnlessEqual(provisioning.binomial(10, 1), 10) - self.failUnlessEqual(provisioning.binomial(10, 2), 45) - self.failUnlessEqual(provisioning.binomial(10, 9), 10) - self.failUnlessEqual(provisioning.binomial(10, 10), 1) - -DAY=24*60*60 -MONTH=31*DAY -YEAR=365*DAY - -class Reliability(unittest.TestCase): - def test_basic(self): - if ReliabilityModel is None: - raise unittest.SkipTest("reliability model requires NumPy") - - # test that numpy math works the way I think it does - import numpy - decay = numpy.matrix([[1,0,0], - [.1,.9,0], - [.01,.09,.9], - ]) - start = numpy.array([0,0,1]) - g2 = (start * decay).A[0] - self.failUnlessEqual(repr(g2), repr(numpy.array([.01,.09,.9]))) - g3 = (g2 * decay).A[0] - self.failUnlessEqual(repr(g3), repr(numpy.array([.028,.162,.81]))) - - # and the dot product - recoverable = numpy.array([0,1,1]) - P_recoverable_g2 = numpy.dot(g2, recoverable) - self.failUnlessAlmostEqual(P_recoverable_g2, .9 + .09) - P_recoverable_g3 = numpy.dot(g3, recoverable) - self.failUnlessAlmostEqual(P_recoverable_g3, .81 + .162) - - r = ReliabilityModel.run(delta=100000, - report_period=3*MONTH, - report_span=5*YEAR) - self.failUnlessEqual(len(r.samples), 20) - - last_row = r.samples[-1] - #print last_row - (when, unmaintained_shareprobs, maintained_shareprobs, - P_repaired_last_check_period, - cumulative_number_of_repairs, - cumulative_number_of_new_shares, - P_dead_unmaintained, P_dead_maintained) = last_row - self.failUnless(isinstance(P_repaired_last_check_period, float)) - self.failUnless(isinstance(P_dead_unmaintained, float)) - self.failUnless(isinstance(P_dead_maintained, float)) - self.failUnlessAlmostEqual(P_dead_unmaintained, 0.033591004555395272) - self.failUnlessAlmostEqual(P_dead_maintained, 3.2983995819177542e-08) - diff -Nru tahoe-lafs-1.9.2/src/allmydata/test/test_repairer.py tahoe-lafs-1.10.0/src/allmydata/test/test_repairer.py --- tahoe-lafs-1.9.2/src/allmydata/test/test_repairer.py 2012-05-17 00:16:42.000000000 +0000 +++ tahoe-lafs-1.10.0/src/allmydata/test/test_repairer.py 2013-09-03 15:38:27.000000000 +0000 @@ -62,9 +62,9 @@ c0.DEFAULT_ENCODING_PARAMETERS['max_segment_size'] = 12 d = c0.upload(upload.Data(common.TEST_DATA, convergence="")) def _stash_uri(ur): - self.uri = ur.uri - self.c0_filenode = c0.create_node_from_uri(ur.uri) - self.c1_filenode = c1.create_node_from_uri(ur.uri) + self.uri = ur.get_uri() + self.c0_filenode = c0.create_node_from_uri(ur.get_uri()) + self.c1_filenode = c1.create_node_from_uri(ur.get_uri()) d.addCallback(_stash_uri) return d @@ -117,7 +117,7 @@ judgement(vr) except unittest.FailTest, e: # FailTest just uses e.args[0] == str - new_arg = str(e.args[0]) + "\nvr.data is: " + str(vr.get_data()) + new_arg = str(e.args[0]) + "\nvr.data is: " + str(vr.as_dict()) e.args = (new_arg,) raise d.addCallback(_check) @@ -127,15 +127,14 @@ """ Verify says the file is healthy when none of the shares have been touched in a way that matters. It doesn't use more than seven times as many reads as it needs.""" - self.failUnless(vr.is_healthy(), (vr, vr.is_healthy(), vr.get_data())) - data = vr.get_data() - self.failUnless(data['count-shares-good'] == 10, data) - self.failUnless(len(data['sharemap']) == 10, data) - self.failUnless(data['count-shares-needed'] == 3, data) - self.failUnless(data['count-shares-expected'] == 10, data) - self.failUnless(data['count-good-share-hosts'] == 10, data) - self.failUnless(len(data['servers-responding']) == 10, data) - self.failUnless(len(data['list-corrupt-shares']) == 0, data) + self.failUnless(vr.is_healthy(), (vr, vr.is_healthy(), vr.as_dict())) + self.failUnlessEqual(vr.get_share_counter_good(), 10) + self.failUnlessEqual(len(vr.get_sharemap()), 10) + self.failUnlessEqual(vr.get_encoding_needed(), 3) + self.failUnlessEqual(vr.get_encoding_expected(), 10) + self.failUnlessEqual(vr.get_host_counter_good_shares(), 10) + self.failUnlessEqual(len(vr.get_servers_responding()), 10) + self.failUnlessEqual(len(vr.get_corrupt_shares()), 0) def test_ok_no_corruption(self): self.basedir = "repairer/Verifier/ok_no_corruption" @@ -163,15 +162,14 @@ giving you the share data. Test that verifier handles these answers correctly. It doesn't use more than seven times as many reads as it needs.""" - self.failIf(vr.is_healthy(), (vr, vr.is_healthy(), vr.get_data())) - data = vr.get_data() - self.failUnless(data['count-shares-good'] == 9, data) - self.failUnless(len(data['sharemap']) == 9, data) - self.failUnless(data['count-shares-needed'] == 3, data) - self.failUnless(data['count-shares-expected'] == 10, data) - self.failUnless(data['count-good-share-hosts'] == 9, data) - self.failUnless(len(data['servers-responding']) == 9, data) - self.failUnless(len(data['list-corrupt-shares']) == 0, data) + self.failIf(vr.is_healthy(), (vr, vr.is_healthy(), vr.as_dict())) + self.failUnlessEqual(vr.get_share_counter_good(), 9) + self.failUnlessEqual(len(vr.get_sharemap()), 9) + self.failUnlessEqual(vr.get_encoding_needed(), 3) + self.failUnlessEqual(vr.get_encoding_expected(), 10) + self.failUnlessEqual(vr.get_host_counter_good_shares(), 9) + self.failUnlessEqual(len(vr.get_servers_responding()), 9) + self.failUnlessEqual(len(vr.get_corrupt_shares()), 0) def test_corrupt_file_verno(self): self.basedir = "repairer/Verifier/corrupt_file_verno" @@ -184,18 +182,15 @@ # offsets) to something larger than 2 will trigger a # ShareVersionIncompatible exception, which should be counted in # list-incompatible-shares, rather than list-corrupt-shares. - self.failIf(vr.is_healthy(), (vr, vr.is_healthy(), vr.get_data())) - data = vr.get_data() - self.failUnlessEqual(data['count-shares-good'], 9) - self.failUnlessEqual(len(data['sharemap']), 9) - self.failUnlessEqual(data['count-shares-needed'], 3) - self.failUnlessEqual(data['count-shares-expected'], 10) - self.failUnlessEqual(data['count-good-share-hosts'], 9) - self.failUnlessEqual(len(data['servers-responding']), 10) - self.failUnlessEqual(len(data['list-corrupt-shares']), 0) - self.failUnlessEqual(data['count-corrupt-shares'], 0) - self.failUnlessEqual(len(data['list-incompatible-shares']), 1) - self.failUnlessEqual(data['count-incompatible-shares'], 1) + self.failIf(vr.is_healthy(), (vr, vr.is_healthy(), vr.as_dict())) + self.failUnlessEqual(vr.get_share_counter_good(), 9) + self.failUnlessEqual(len(vr.get_sharemap()), 9) + self.failUnlessEqual(vr.get_encoding_needed(), 3) + self.failUnlessEqual(vr.get_encoding_expected(), 10) + self.failUnlessEqual(vr.get_host_counter_good_shares(), 9) + self.failUnlessEqual(len(vr.get_servers_responding()), 10) + self.failUnlessEqual(len(vr.get_corrupt_shares()), 0) + self.failUnlessEqual(len(vr.get_incompatible_shares()), 1) def test_corrupt_share_verno(self): self.basedir = "repairer/Verifier/corrupt_share_verno" @@ -206,18 +201,15 @@ # corruption of fields that the server does not check (which is most # of them), which will be detected by the client as it downloads # those shares. - self.failIf(vr.is_healthy(), (vr, vr.is_healthy(), vr.get_data())) - data = vr.get_data() - self.failUnlessEqual(data['count-shares-good'], 9) - self.failUnlessEqual(data['count-shares-needed'], 3) - self.failUnlessEqual(data['count-shares-expected'], 10) - self.failUnlessEqual(data['count-good-share-hosts'], 9) - self.failUnlessEqual(data['count-corrupt-shares'], 1) - self.failUnlessEqual(len(data['list-corrupt-shares']), 1) - self.failUnlessEqual(data['count-incompatible-shares'], 0) - self.failUnlessEqual(len(data['list-incompatible-shares']), 0) - self.failUnlessEqual(len(data['servers-responding']), 10) - self.failUnlessEqual(len(data['sharemap']), 9) + self.failIf(vr.is_healthy(), (vr, vr.is_healthy(), vr.as_dict())) + self.failUnlessEqual(vr.get_share_counter_good(), 9) + self.failUnlessEqual(vr.get_encoding_needed(), 3) + self.failUnlessEqual(vr.get_encoding_expected(), 10) + self.failUnlessEqual(vr.get_host_counter_good_shares(), 9) + self.failUnlessEqual(len(vr.get_corrupt_shares()), 1) + self.failUnlessEqual(len(vr.get_incompatible_shares()), 0) + self.failUnlessEqual(len(vr.get_servers_responding()), 10) + self.failUnlessEqual(len(vr.get_sharemap()), 9) def test_corrupt_sharedata_offset(self): self.basedir = "repairer/Verifier/corrupt_sharedata_offset" @@ -498,7 +490,7 @@ self.failIfBigger(delta_reads, MAX_DELTA_READS) self.failIfBigger(delta_allocates, (DELTA_WRITES_PER_SHARE * 7)) self.failIf(pre.is_healthy()) - self.failUnless(post.is_healthy(), post.data) + self.failUnless(post.is_healthy(), post.as_dict()) # Make sure we really have 10 shares. shares = self.find_uri_shares(self.uri) @@ -690,9 +682,9 @@ c0.DEFAULT_ENCODING_PARAMETERS['n'] = 66 d = c0.upload(upload.Data(DATA, convergence="")) def _then(ur): - self.uri = ur.uri + self.uri = ur.get_uri() self.delete_shares_numbered(self.uri, [0]) - self.c0_filenode = c0.create_node_from_uri(ur.uri) + self.c0_filenode = c0.create_node_from_uri(ur.get_uri()) self._stash_counts() return self.c0_filenode.check_and_repair(Monitor()) d.addCallback(_then) @@ -724,7 +716,12 @@ # not respond to the pre-repair filecheck prr = rr.get_post_repair_results() expected = set(self.g.get_all_serverids()) - self.failUnlessEqual(expected, set(prr.data["servers-responding"])) + responding_set = frozenset([s.get_serverid() for s in prr.get_servers_responding()]) + self.failIf(expected - responding_set, expected - responding_set) + self.failIf(responding_set - expected, responding_set - expected) + self.failUnlessEqual(expected, + set([s.get_serverid() + for s in prr.get_servers_responding()])) d.addCallback(_check) return d diff -Nru tahoe-lafs-1.9.2/src/allmydata/test/test_runner.py tahoe-lafs-1.10.0/src/allmydata/test/test_runner.py --- tahoe-lafs-1.9.2/src/allmydata/test/test_runner.py 2012-05-14 02:24:30.000000000 +0000 +++ tahoe-lafs-1.10.0/src/allmydata/test/test_runner.py 2013-09-03 15:38:27.000000000 +0000 @@ -88,6 +88,10 @@ if os.path.basename(root_from_cwd) == 'src': root_from_cwd = os.path.dirname(root_from_cwd) + # This is needed if we are running in a temporary directory created by 'make tmpfstest'. + if os.path.basename(root_from_cwd).startswith('tmp'): + root_from_cwd = os.path.dirname(root_from_cwd) + same = (root_from_cwd == root_to_check) if not same: try: @@ -105,6 +109,10 @@ if os.path.basename(root_from_cwdu) == u'src': root_from_cwdu = os.path.dirname(root_from_cwdu) + # This is needed if we are running in a temporary directory created by 'make tmpfstest'. + if os.path.basename(root_from_cwdu).startswith(u'tmp'): + root_from_cwdu = os.path.dirname(root_from_cwdu) + if not isinstance(root_from_cwd, unicode) and root_from_cwd.decode(get_filesystem_encoding(), 'replace') != root_from_cwdu: msg += ("However, this may be a false alarm because the current directory path\n" "is not representable in the filesystem encoding. Please run the tests\n" @@ -146,7 +154,7 @@ self.failIfEqual(required_verstr, "unknown", "We don't know our version, because this distribution didn't come " - "with a _version.py and 'setup.py darcsver' hasn't been run.") + "with a _version.py and 'setup.py update_version' hasn't been run.") srcdir = os.path.dirname(os.path.dirname(os.path.normcase(os.path.realpath(srcfile)))) info = repr((res, allmydata.__appname__, required_verstr, srcdir)) @@ -194,12 +202,6 @@ def test_version_no_noise(self): self.skip_if_cannot_run_bintahoe() - from allmydata import get_package_versions, normalized_version - twisted_ver = get_package_versions()['Twisted'] - - if not normalized_version(twisted_ver) >= normalized_version('9.0.0'): - raise unittest.SkipTest("We pass this test only with Twisted >= v9.0.0") - d = self.run_bintahoe(["--version"]) def _cb(res): out, err, rc_or_sig = res @@ -282,7 +284,7 @@ # test the --node-directory form n3 = os.path.join(basedir, command + "-n3") - argv = ["--quiet", command, "--node-directory", n3] + argv = ["--quiet", "--node-directory", n3, command] rc, out, err = self.run_tahoe(argv) self.failUnlessEqual(err, "") self.failUnlessEqual(out, "") @@ -353,7 +355,7 @@ c1 = os.path.join(basedir, "c1") HOTLINE_FILE = os.path.join(c1, "suicide_prevention_hotline") TWISTD_PID_FILE = os.path.join(c1, "twistd.pid") - INTRODUCER_FURL_FILE = os.path.join(c1, "introducer.furl") + INTRODUCER_FURL_FILE = os.path.join(c1, "private", "introducer.furl") PORTNUM_FILE = os.path.join(c1, "introducer.port") NODE_URL_FILE = os.path.join(c1, "node.url") CONFIG_FILE = os.path.join(c1, "tahoe.cfg") @@ -404,8 +406,8 @@ d.addCallback(lambda res: self.poll(_node_has_started)) def _started(res): - # read the introducer.furl and introducer.port files so we can check that their - # contents don't change on restart + # read the introducer.furl and introducer.port files so we can + # check that their contents don't change on restart self.furl = fileutil.read(INTRODUCER_FURL_FILE) self.failUnless(os.path.exists(PORTNUM_FILE)) self.portnum = fileutil.read(PORTNUM_FILE) @@ -473,12 +475,6 @@ def test_client_no_noise(self): self.skip_if_cannot_daemonize() - from allmydata import get_package_versions, normalized_version - twisted_ver = get_package_versions()['Twisted'] - - if not normalized_version(twisted_ver) >= normalized_version('9.0.0'): - raise unittest.SkipTest("We pass this test only with Twisted >= v9.0.0") - basedir = self.workdir("test_client_no_noise") c1 = os.path.join(basedir, "c1") HOTLINE_FILE = os.path.join(c1, "suicide_prevention_hotline") @@ -588,6 +584,7 @@ d.addCallback(_cb2) def _node_has_started(): + # this depends upon both files being created atomically return os.path.exists(NODE_URL_FILE) and os.path.exists(PORTNUM_FILE) d.addCallback(lambda res: self.poll(_node_has_started)) @@ -627,7 +624,9 @@ # 'tahoe stop' command takes a while. def _stop(res): fileutil.write(HOTLINE_FILE, "") - self.failUnless(os.path.exists(TWISTD_PID_FILE), (TWISTD_PID_FILE, os.listdir(os.path.dirname(TWISTD_PID_FILE)))) + self.failUnless(os.path.exists(TWISTD_PID_FILE), + (TWISTD_PID_FILE, + os.listdir(os.path.dirname(TWISTD_PID_FILE)))) return self.run_bintahoe(["--quiet", "stop", c1]) d.addCallback(_stop) diff -Nru tahoe-lafs-1.9.2/src/allmydata/test/test_sftp.py tahoe-lafs-1.10.0/src/allmydata/test/test_sftp.py --- tahoe-lafs-1.9.2/src/allmydata/test/test_sftp.py 2012-05-14 02:07:31.000000000 +0000 +++ tahoe-lafs-1.10.0/src/allmydata/test/test_sftp.py 2013-09-03 15:38:27.000000000 +0000 @@ -519,6 +519,46 @@ return d2 d.addCallback(_read_short) + # check that failed downloads cause failed reads. Note that this + # trashes the grid (by deleting all shares), so this must be at the + # end of the test function. + d.addCallback(lambda ign: self.handler.openFile("uri/"+self.gross_uri, sftp.FXF_READ, {})) + def _read_broken(rf): + d2 = defer.succeed(None) + d2.addCallback(lambda ign: self.g.nuke_from_orbit()) + d2.addCallback(lambda ign: + self.shouldFailWithSFTPError(sftp.FX_FAILURE, "read broken", + rf.readChunk, 0, 100)) + # close shouldn't fail + d2.addCallback(lambda ign: rf.close()) + d2.addCallback(lambda res: self.failUnlessReallyEqual(res, None)) + return d2 + d.addCallback(_read_broken) + + d.addCallback(lambda ign: self.failUnlessEqual(sftpd.all_heisenfiles, {})) + d.addCallback(lambda ign: self.failUnlessEqual(self.handler._heisenfiles, {})) + return d + + def test_openFile_read_error(self): + # The check at the end of openFile_read tested this for large files, + # but it trashed the grid in the process, so this needs to be a + # separate test. + small = upload.Data("0123456789"*10, None) + d = self._set_up("openFile_read_error") + d.addCallback(lambda ign: self.root.add_file(u"small", small)) + d.addCallback(lambda n: self.handler.openFile("/uri/"+n.get_uri(), sftp.FXF_READ, {})) + def _read_broken(rf): + d2 = defer.succeed(None) + d2.addCallback(lambda ign: self.g.nuke_from_orbit()) + d2.addCallback(lambda ign: + self.shouldFailWithSFTPError(sftp.FX_FAILURE, "read broken", + rf.readChunk, 0, 100)) + # close shouldn't fail + d2.addCallback(lambda ign: rf.close()) + d2.addCallback(lambda res: self.failUnlessReallyEqual(res, None)) + return d2 + d.addCallback(_read_broken) + d.addCallback(lambda ign: self.failUnlessEqual(sftpd.all_heisenfiles, {})) d.addCallback(lambda ign: self.failUnlessEqual(self.handler._heisenfiles, {})) return d @@ -982,6 +1022,26 @@ self.shouldFail(NoSuchChildError, "rename new while open", "new", self.root.get, u"new")) + # check that failed downloads cause failed reads and failed close, + # when open for writing. Note that this trashes the grid (by deleting + # all shares), so this must be at the end of the test function. + gross = u"gro\u00DF".encode("utf-8") + d.addCallback(lambda ign: self.handler.openFile(gross, sftp.FXF_READ | sftp.FXF_WRITE, {})) + def _read_write_broken(rwf): + d2 = rwf.writeChunk(0, "abcdefghij") + d2.addCallback(lambda ign: self.g.nuke_from_orbit()) + + # reading should fail (reliably if we read past the written chunk) + d2.addCallback(lambda ign: + self.shouldFailWithSFTPError(sftp.FX_FAILURE, "read/write broken", + rwf.readChunk, 0, 100)) + # close should fail in this case + d2.addCallback(lambda ign: + self.shouldFailWithSFTPError(sftp.FX_FAILURE, "read/write broken close", + rwf.close)) + return d2 + d.addCallback(_read_write_broken) + d.addCallback(lambda ign: self.failUnlessEqual(sftpd.all_heisenfiles, {})) d.addCallback(lambda ign: self.failUnlessEqual(self.handler._heisenfiles, {})) return d diff -Nru tahoe-lafs-1.9.2/src/allmydata/test/test_storage.py tahoe-lafs-1.10.0/src/allmydata/test/test_storage.py --- tahoe-lafs-1.9.2/src/allmydata/test/test_storage.py 2012-06-22 15:23:15.000000000 +0000 +++ tahoe-lafs-1.10.0/src/allmydata/test/test_storage.py 2013-09-03 15:38:27.000000000 +0000 @@ -1593,7 +1593,7 @@ def write_sdmf_share_to_server(self, storage_index, empty=False): - # Some tests need SDMF shares to verify that we can still + # Some tests need SDMF shares to verify that we can still # read them. This method writes one, which resembles but is not assert self.rref write = self.ss.remote_slot_testv_and_readv_and_writev @@ -1877,8 +1877,8 @@ def test_write_test_vectors(self): - # If we give the write proxy a bogus test vector at - # any point during the process, it should fail to write when we + # If we give the write proxy a bogus test vector at + # any point during the process, it should fail to write when we # tell it to write. def _check_failure(results): self.failUnlessEqual(len(results), 2) @@ -2153,7 +2153,7 @@ # 5: Write the root hash and salt hash # 6: Write the signature and verification key # 7: Write the file. - # + # # Some of these can be performed out-of-order, and some can't. # The dependencies that I want to test here are: # - Private key before block hashes @@ -2678,7 +2678,7 @@ def test_sdmf_writer(self): # Go through the motions of writing an SDMF share to the storage # server. Then read the storage server to see that the share got - # written in the way that we think it should have. + # written in the way that we think it should have. # We do this first so that the necessary instance variables get # set the way we want them for the tests below. diff -Nru tahoe-lafs-1.9.2/src/allmydata/test/test_system.py tahoe-lafs-1.10.0/src/allmydata/test/test_system.py --- tahoe-lafs-1.9.2/src/allmydata/test/test_system.py 2012-06-16 18:21:04.000000000 +0000 +++ tahoe-lafs-1.10.0/src/allmydata/test/test_system.py 2013-09-03 15:38:27.000000000 +0000 @@ -1,6 +1,7 @@ -from base64 import b32encode -import os, sys, time, simplejson + +import os, re, sys, time, simplejson from cStringIO import StringIO + from twisted.trial import unittest from twisted.internet import defer from twisted.internet import threads # CLI tests use deferToThread @@ -125,7 +126,7 @@ return d1 d.addCallback(_do_upload) def _upload_done(results): - theuri = results.uri + theuri = results.get_uri() log.msg("upload finished: uri is %s" % (theuri,)) self.uri = theuri assert isinstance(self.uri, str), self.uri @@ -227,7 +228,7 @@ u = upload.Data(HELPER_DATA, convergence=convergence) d = self.extra_node.upload(u) def _uploaded(results): - n = self.clients[1].create_node_from_uri(results.uri) + n = self.clients[1].create_node_from_uri(results.get_uri()) return download_to_data(n) d.addCallback(_uploaded) def _check(newdata): @@ -241,7 +242,7 @@ u.debug_stash_RemoteEncryptedUploadable = True d = self.extra_node.upload(u) def _uploaded(results): - n = self.clients[1].create_node_from_uri(results.uri) + n = self.clients[1].create_node_from_uri(results.get_uri()) return download_to_data(n) d.addCallback(_uploaded) def _check(newdata): @@ -326,13 +327,13 @@ d.addCallback(lambda res: self.extra_node.upload(u2)) def _uploaded(results): - cap = results.uri + cap = results.get_uri() log.msg("Second upload complete", level=log.NOISY, facility="tahoe.test.test_system") # this is really bytes received rather than sent, but it's # convenient and basically measures the same thing - bytes_sent = results.ciphertext_fetched + bytes_sent = results.get_ciphertext_fetched() self.failUnless(isinstance(bytes_sent, (int, long)), bytes_sent) # We currently don't support resumption of upload if the data is @@ -789,7 +790,7 @@ newappverstr = "%s: %s" % (allmydata.__appname__, altverstr) self.failUnless((appverstr in res) or (newappverstr in res), (appverstr, newappverstr, res)) - self.failUnless("Announcement Summary: storage: 5, stub_client: 5" in res) + self.failUnless("Announcement Summary: storage: 5" in res) self.failUnless("Subscription Summary: storage: 5" in res) self.failUnless("tahoe.css" in res) except unittest.FailTest: @@ -810,9 +811,9 @@ self.failUnlessEqual(data["subscription_summary"], {"storage": 5}) self.failUnlessEqual(data["announcement_summary"], - {"storage": 5, "stub_client": 5}) + {"storage": 5}) self.failUnlessEqual(data["announcement_distinct_hosts"], - {"storage": 1, "stub_client": 1}) + {"storage": 1}) except unittest.FailTest: print print "GET %s?t=json output was:" % self.introweb_url @@ -1096,16 +1097,15 @@ public = "uri/" + self._root_directory_uri d = getPage(base) def _got_welcome(page): - # XXX This test is oversensitive to formatting - expected = "Connected to %d\n of %d known storage servers:" % (self.numclients, self.numclients) - self.failUnless(expected in page, - "I didn't see the right 'connected storage servers'" - " message in: %s" % page - ) - expected = "My nodeid: %s" % (b32encode(self.clients[0].nodeid).lower(),) - self.failUnless(expected in page, - "I didn't see the right 'My nodeid' message " - "in: %s" % page) + html = page.replace('\n', ' ') + connected_re = r'Connected to %d\s*of %d known storage servers' % (self.numclients, self.numclients) + self.failUnless(re.search(connected_re, html), + "I didn't see the right '%s' message in:\n%s" % (connected_re, page)) + # nodeids/tubids don't have any regexp-special characters + nodeid_re = r'Node ID:\s*%s' % ( + self.clients[0].get_long_tubid(), self.clients[0].get_long_nodeid()) + self.failUnless(re.search(nodeid_re, html), + "I didn't see the right '%s' message in:\n%s" % (nodeid_re, page)) self.failUnless("Helper: 0 active uploads" in page) d.addCallback(_got_welcome) d.addCallback(self.log, "done with _got_welcome") @@ -1113,9 +1113,9 @@ # get the welcome page from the node that uses the helper too d.addCallback(lambda res: getPage(self.helper_webish_url)) def _got_welcome_helper(page): - self.failUnless("Connected to helper?: yes" in page, - page) - self.failUnless("Not running helper" in page) + html = page.replace('\n', ' ') + self.failUnless(re.search(r'
\s*
Helper
', html), page) + self.failUnlessIn("Not running helper", page) d.addCallback(_got_welcome_helper) d.addCallback(lambda res: getPage(base + public)) @@ -1273,7 +1273,7 @@ # itself) doesn't explode when you ask for its status d.addCallback(lambda res: getPage(self.helper_webish_url + "status/")) def _got_non_helper_status(res): - self.failUnless("Upload and Download Status" in res) + self.failUnlessIn("Recent and Active Operations", res) d.addCallback(_got_non_helper_status) # or for helper status with t=json @@ -1287,8 +1287,8 @@ # see if the statistics page exists d.addCallback(lambda res: self.GET("statistics")) def _got_stats(res): - self.failUnless("Node Statistics" in res) - self.failUnless(" 'downloader.files_downloaded': 5," in res, res) + self.failUnlessIn("Operational Statistics", res) + self.failUnlessIn(" 'downloader.files_downloaded': 5,", res) d.addCallback(_got_stats) d.addCallback(lambda res: self.GET("statistics?t=json")) def _got_stats_json(res): @@ -1441,7 +1441,7 @@ def run(ignored, verb, *args, **kwargs): stdin = kwargs.get("stdin", "") - newargs = [verb] + nodeargs + list(args) + newargs = nodeargs + [verb] + list(args) return self._run_cli(newargs, stdin=stdin) def _check_ls((out,err), expected_children, unexpected_children=[]): @@ -1746,7 +1746,7 @@ # tahoe_ls doesn't currently handle the error correctly: it tries to # JSON-parse a traceback. ## def _ls_missing(res): -## argv = ["ls"] + nodeargs + ["bogus"] +## argv = nodeargs + ["ls", "bogus"] ## return self._run_cli(argv) ## d.addCallback(_ls_missing) ## def _check_ls_missing((out,err)): @@ -1770,7 +1770,7 @@ def _run_in_subprocess(ignored, verb, *args, **kwargs): stdin = kwargs.get("stdin") env = kwargs.get("env") - newargs = [verb, "--node-directory", self.getdir("client0")] + list(args) + newargs = ["--node-directory", self.getdir("client0"), verb] + list(args) return self.run_bintahoe(newargs, stdin=stdin, env=env) def _check_succeeded(res, check_stderr=True): diff -Nru tahoe-lafs-1.9.2/src/allmydata/test/test_upload.py tahoe-lafs-1.10.0/src/allmydata/test/test_upload.py --- tahoe-lafs-1.9.2/src/allmydata/test/test_upload.py 2012-06-21 19:42:46.000000000 +0000 +++ tahoe-lafs-1.10.0/src/allmydata/test/test_upload.py 2013-09-03 15:38:27.000000000 +0000 @@ -11,7 +11,7 @@ from allmydata import uri, monitor, client from allmydata.immutable import upload, encode from allmydata.interfaces import FileTooLargeError, UploadUnhappinessError -from allmydata.util import log +from allmydata.util import log, base32 from allmydata.util.assertutil import precondition from allmydata.util.deferredutil import DeferredListShouldSucceed from allmydata.test.no_network import GridTestMixin @@ -20,11 +20,12 @@ shares_by_server, merge_servers from allmydata.storage_client import StorageFarmBroker from allmydata.storage.server import storage_index_to_dir +from allmydata.client import Client MiB = 1024*1024 def extract_uri(results): - return results.uri + return results.get_uri() # Some of these took longer than 480 seconds on Zandr's arm box, but this may # have been due to an earlier test ERROR'ing out due to timeout, which seems @@ -105,7 +106,7 @@ self.allocated = [] self.queries = 0 self.version = { "http://allmydata.org/tahoe/protocols/storage/v1" : - { "maximum-immutable-share-size": 2**32 }, + { "maximum-immutable-share-size": 2**32 - 1 }, "application-version": str(allmydata.__full_version__), } if mode == "small": @@ -197,7 +198,9 @@ for fakeid in range(self.num_servers) ] self.storage_broker = StorageFarmBroker(None, permute_peers=True) for (serverid, rref) in servers: - self.storage_broker.test_add_rref(serverid, rref) + ann = {"anonymous-storage-FURL": "pb://%s@nowhere/fake" % base32.b2a(serverid), + "permutation-seed-base32": base32.b2a(serverid) } + self.storage_broker.test_add_rref(serverid, rref, ann) self.last_servers = [s[1] for s in servers] def log(self, *args, **kwargs): @@ -633,42 +636,52 @@ class StorageIndex(unittest.TestCase): def test_params_must_matter(self): DATA = "I am some data" + PARAMS = Client.DEFAULT_ENCODING_PARAMETERS + u = upload.Data(DATA, convergence="") + u.set_default_encoding_parameters(PARAMS) eu = upload.EncryptAnUploadable(u) d1 = eu.get_storage_index() # CHK means the same data should encrypt the same way u = upload.Data(DATA, convergence="") + u.set_default_encoding_parameters(PARAMS) eu = upload.EncryptAnUploadable(u) d1a = eu.get_storage_index() # but if we use a different convergence string it should be different u = upload.Data(DATA, convergence="wheee!") + u.set_default_encoding_parameters(PARAMS) eu = upload.EncryptAnUploadable(u) d1salt1 = eu.get_storage_index() # and if we add yet a different convergence it should be different again u = upload.Data(DATA, convergence="NOT wheee!") + u.set_default_encoding_parameters(PARAMS) eu = upload.EncryptAnUploadable(u) d1salt2 = eu.get_storage_index() # and if we use the first string again it should be the same as last time u = upload.Data(DATA, convergence="wheee!") + u.set_default_encoding_parameters(PARAMS) eu = upload.EncryptAnUploadable(u) d1salt1a = eu.get_storage_index() # and if we change the encoding parameters, it should be different (from the same convergence string with different encoding parameters) u = upload.Data(DATA, convergence="") + u.set_default_encoding_parameters(PARAMS) u.encoding_param_k = u.default_encoding_param_k + 1 eu = upload.EncryptAnUploadable(u) d2 = eu.get_storage_index() # and if we use a random key, it should be different than the CHK u = upload.Data(DATA, convergence=None) + u.set_default_encoding_parameters(PARAMS) eu = upload.EncryptAnUploadable(u) d3 = eu.get_storage_index() # and different from another instance u = upload.Data(DATA, convergence=None) + u.set_default_encoding_parameters(PARAMS) eu = upload.EncryptAnUploadable(u) d4 = eu.get_storage_index() @@ -765,9 +778,7 @@ broker = self.g.clients[0].storage_broker sh = self.g.clients[0]._secret_holder data = upload.Data("data" * 10000, convergence="") - data.encoding_param_k = 3 - data.encoding_param_happy = 4 - data.encoding_param_n = 10 + data.set_default_encoding_parameters({'k': 3, 'happy': 4, 'n': 10}) uploadable = upload.EncryptAnUploadable(data) encoder = encode.Encoder() encoder.set_encrypted_uploadable(uploadable) @@ -861,7 +872,7 @@ self.data = data d = client.upload(data) def _store_uri(ur): - self.uri = ur.uri + self.uri = ur.get_uri() d.addCallback(_store_uri) d.addCallback(lambda ign: self.find_uri_shares(self.uri)) @@ -879,13 +890,12 @@ DATA = "data" * 100 u = upload.Data(DATA, convergence="") d = c0.upload(u) - d.addCallback(lambda ur: c0.create_node_from_uri(ur.uri)) + d.addCallback(lambda ur: c0.create_node_from_uri(ur.get_uri())) m = monitor.Monitor() d.addCallback(lambda fn: fn.check(m)) def _check(cr): - data = cr.get_data() - self.failUnlessEqual(data["count-shares-needed"], 7) - self.failUnlessEqual(data["count-shares-expected"], 12) + self.failUnlessEqual(cr.get_encoding_needed(), 7) + self.failUnlessEqual(cr.get_encoding_expected(), 12) d.addCallback(_check) return d @@ -1159,7 +1169,7 @@ # Make sure that only as many shares as necessary to satisfy # servers of happiness were pushed. d.addCallback(lambda results: - self.failUnlessEqual(results.pushed_shares, 3)) + self.failUnlessEqual(results.get_pushed_shares(), 3)) d.addCallback(lambda ign: self.failUnless(self._has_happy_share_distribution())) return d diff -Nru tahoe-lafs-1.9.2/src/allmydata/test/test_util.py tahoe-lafs-1.10.0/src/allmydata/test/test_util.py --- tahoe-lafs-1.9.2/src/allmydata/test/test_util.py 2012-05-17 00:16:42.000000000 +0000 +++ tahoe-lafs-1.10.0/src/allmydata/test/test_util.py 2013-09-03 15:38:27.000000000 +0000 @@ -420,6 +420,15 @@ fileutil.rm_dir(basedir) fileutil.remove_if_possible(fn) # should survive errors + def test_write_atomically(self): + basedir = "util/FileUtil/test_write_atomically" + fileutil.make_dirs(basedir) + fn = os.path.join(basedir, "here") + fileutil.write_atomically(fn, "one") + self.failUnlessEqual(fileutil.read(fn), "one") + fileutil.write_atomically(fn, "two", mode="") # non-binary + self.failUnlessEqual(fileutil.read(fn), "two") + def test_open_or_create(self): basedir = "util/FileUtil/test_open_or_create" fileutil.make_dirs(basedir) @@ -428,7 +437,7 @@ f.write("stuff.") f.close() f = fileutil.open_or_create(fn) - f.seek(0, 2) + f.seek(0, os.SEEK_END) f.write("more.") f.close() f = open(fn, "r") @@ -724,7 +733,8 @@ (1000*1000*1000, "1.00 GB"), (1000*1000*1000*1000, "1.00 TB"), (1000*1000*1000*1000*1000, "1.00 PB"), - (1234567890123456, "1.23 PB"), + (1000*1000*1000*1000*1000*1000, "1.00 EB"), + (1234567890123456789, "1.23 EB"), ] for (x, expected) in tests_si: got = abbreviate.abbreviate_space(x, SI=True) @@ -744,7 +754,8 @@ (1024*1024*1024*1024, "1.00 TiB"), (1000*1000*1000*1000*1000, "909.49 TiB"), (1024*1024*1024*1024*1024, "1.00 PiB"), - (1234567890123456, "1.10 PiB"), + (1024*1024*1024*1024*1024*1024, "1.00 EiB"), + (1234567890123456789, "1.07 EiB"), ] for (x, expected) in tests_base1024: got = abbreviate.abbreviate_space(x, SI=False) @@ -766,8 +777,19 @@ self.failUnlessEqual(p("10MiB"), 10*1024*1024) self.failUnlessEqual(p("5G"), 5*1000*1000*1000) self.failUnlessEqual(p("4GiB"), 4*1024*1024*1024) + self.failUnlessEqual(p("3TB"), 3*1000*1000*1000*1000) + self.failUnlessEqual(p("3TiB"), 3*1024*1024*1024*1024) + self.failUnlessEqual(p("6PB"), 6*1000*1000*1000*1000*1000) + self.failUnlessEqual(p("6PiB"), 6*1024*1024*1024*1024*1024) + self.failUnlessEqual(p("9EB"), 9*1000*1000*1000*1000*1000*1000) + self.failUnlessEqual(p("9EiB"), 9*1024*1024*1024*1024*1024*1024) + e = self.failUnlessRaises(ValueError, p, "12 cubits") - self.failUnless("12 cubits" in str(e)) + self.failUnlessIn("12 cubits", str(e)) + e = self.failUnlessRaises(ValueError, p, "1 BB") + self.failUnlessIn("1 BB", str(e)) + e = self.failUnlessRaises(ValueError, p, "fhtagn") + self.failUnlessIn("fhtagn", str(e)) class Limiter(unittest.TestCase): timeout = 480 # This takes longer than 240 seconds on Francois's arm box. diff -Nru tahoe-lafs-1.9.2/src/allmydata/test/test_version.py tahoe-lafs-1.10.0/src/allmydata/test/test_version.py --- tahoe-lafs-1.9.2/src/allmydata/test/test_version.py 2012-05-14 02:07:31.000000000 +0000 +++ tahoe-lafs-1.10.0/src/allmydata/test/test_version.py 2013-09-03 15:38:27.000000000 +0000 @@ -58,7 +58,7 @@ res = cross_check({}, []) self.failUnlessEqual(res, []) - res = cross_check({}, [("sqlite3", ("1.0", "", "blah"))]) + res = cross_check({}, [("allmydata-tahoe", ("1.0", "", "blah"))]) self.failUnlessEqual(res, []) res = cross_check({"foo": ("unparseable", "")}, []) diff -Nru tahoe-lafs-1.9.2/src/allmydata/test/test_web.py tahoe-lafs-1.10.0/src/allmydata/test/test_web.py --- tahoe-lafs-1.9.2/src/allmydata/test/test_web.py 2012-05-23 05:27:26.000000000 +0000 +++ tahoe-lafs-1.10.0/src/allmydata/test/test_web.py 2013-09-03 15:38:27.000000000 +0000 @@ -1,4 +1,4 @@ -import os.path, re, urllib, time +import os.path, re, urllib, time, cgi import simplejson from StringIO import StringIO @@ -11,11 +11,12 @@ from foolscap.api import fireEventually, flushEventualQueue +from nevow.util import escapeToXML from nevow import rend from allmydata import interfaces, uri, webish, dirnode from allmydata.storage.shares import get_share_file -from allmydata.storage_client import StorageFarmBroker +from allmydata.storage_client import StorageFarmBroker, StubServer from allmydata.immutable import upload from allmydata.immutable.downloader.status import DownloadStatus from allmydata.dirnode import DirectoryNode @@ -79,33 +80,40 @@ class FakeUploader(service.Service): name = "uploader" + helper_furl = None + helper_connected = False + def upload(self, uploadable): d = uploadable.get_size() d.addCallback(lambda size: uploadable.read(size)) def _got_data(datav): data = "".join(datav) n = create_chk_filenode(data, self.all_contents) - results = upload.UploadResults() - results.uri = n.get_uri() - return results + ur = upload.UploadResults(file_size=len(data), + ciphertext_fetched=0, + preexisting_shares=0, + pushed_shares=10, + sharemap={}, + servermap={}, + timings={}, + uri_extension_data={}, + uri_extension_hash="fake", + verifycapstr="fakevcap") + ur.set_uri(n.get_uri()) + return ur d.addCallback(_got_data) return d + def get_helper_info(self): - return (None, False) + return (self.helper_furl, self.helper_connected) -class FakeIServer: - def __init__(self, binaryserverid): - self.binaryserverid = binaryserverid - def get_name(self): return "short" - def get_longname(self): return "long" - def get_serverid(self): return self.binaryserverid def build_one_ds(): ds = DownloadStatus("storage_index", 1234) now = time.time() - serverA = FakeIServer(hashutil.tagged_hash("foo", "serverid_a")[:20]) - serverB = FakeIServer(hashutil.tagged_hash("foo", "serverid_b")[:20]) + serverA = StubServer(hashutil.tagged_hash("foo", "serverid_a")[:20]) + serverB = StubServer(hashutil.tagged_hash("foo", "serverid_b")[:20]) storage_index = hashutil.storage_index_hash("SI") e0 = ds.add_segment_request(0, now) e0.activate(now+0.5) @@ -163,6 +171,59 @@ def list_all_helper_statuses(self): return [] +class FakeDisplayableServer(StubServer): + def __init__(self, serverid, nickname): + StubServer.__init__(self, serverid) + self.announcement = {"my-version": "allmydata-tahoe-fake", + "service-name": "storage", + "nickname": nickname} + def is_connected(self): + return True + def get_permutation_seed(self): + return "" + def get_remote_host(self): + return "" + def get_last_loss_time(self): + return None + def get_announcement_time(self): + return None + def get_announcement(self): + return self.announcement + def get_nickname(self): + return self.announcement["nickname"] + +class FakeBucketCounter(object): + def get_state(self): + return {"last-complete-bucket-count": 0} + def get_progress(self): + return {"estimated-time-per-cycle": 0, + "cycle-in-progress": False, + "remaining-wait-time": 0} + +class FakeLeaseChecker(object): + def __init__(self): + self.expiration_enabled = False + self.mode = "age" + self.override_lease_duration = None + self.sharetypes_to_expire = {} + def get_state(self): + return {"history": None} + def get_progress(self): + return {"estimated-time-per-cycle": 0, + "cycle-in-progress": False, + "remaining-wait-time": 0} + +class FakeStorageServer(service.MultiService): + name = 'storage' + def __init__(self, nodeid, nickname): + service.MultiService.__init__(self) + self.my_nodeid = nodeid + self.nickname = nickname + self.bucket_counter = FakeBucketCounter() + self.lease_checker = FakeLeaseChecker() + def get_stats(self): + return {"storage_server.accepting_immutable_shares": False} + class FakeClient(Client): def __init__(self): # don't upcall to Client.__init__, since we only want to initialize a @@ -170,13 +231,16 @@ service.MultiService.__init__(self) self.all_contents = {} self.nodeid = "fake_nodeid" - self.nickname = "fake_nickname" + self.nickname = u"fake_nickname \u263A" self.introducer_furl = "None" self.stats_provider = FakeStatsProvider() self._secret_holder = SecretHolder("lease secret", "convergence secret") self.helper = None self.convergence = "some random string" self.storage_broker = StorageFarmBroker(None, permute_peers=True) + # fake knowledge of another server + self.storage_broker.test_add_server("other_nodeid", + FakeDisplayableServer("other_nodeid", u"other_nickname \u263B")) self.introducer_client = None self.history = FakeHistory() self.uploader = FakeUploader() @@ -188,6 +252,12 @@ None, None, None) self.nodemaker.all_contents = self.all_contents self.mutable_file_default = SDMF_VERSION + self.addService(FakeStorageServer(self.nodeid, self.nickname)) + + def get_long_nodeid(self): + return "v0-nodeid" + def get_long_tubid(self): + return "tubid" def startService(self): return service.MultiService.startService(self) @@ -249,16 +319,27 @@ self._sub_uri = sub_uri foo.set_uri(u"sub", sub_uri, sub_uri) sub = self.s.create_node_from_uri(sub_uri) + self._sub_node = sub _ign, n, blocking_uri = self.makefile(1) foo.set_uri(u"blockingfile", blocking_uri, blocking_uri) + # filenode to test for html encoding issues + self._htmlname_unicode = u"<&weirdly'named\"file>>>_