diff -Nru waitress-1.1.0/appveyor.yml waitress-1.2.0~b2/appveyor.yml --- waitress-1.1.0/appveyor.yml 2017-10-11 00:59:52.000000000 +0000 +++ waitress-1.2.0~b2/appveyor.yml 2019-01-02 20:46:09.000000000 +0000 @@ -1,13 +1,17 @@ environment: matrix: - - PYTHON: "C:\\Python35" - TOXENV: "py35" - PYTHON: "C:\\Python27" TOXENV: "py27" - PYTHON: "C:\\Python27-x64" TOXENV: "py27" - - PYTHON: "C:\\Python35-x64" - TOXENV: "py35" + - PYTHON: "C:\\Python36" + TOXENV: "py36" + - PYTHON: "C:\\Python36-x64" + TOXENV: "py36" + - PYTHON: "C:\\Python37" + TOXENV: "py37" + - PYTHON: "C:\\Python37-x64" + TOXENV: "py37" cache: - '%LOCALAPPDATA%\pip\Cache' diff -Nru waitress-1.1.0/CHANGES.txt waitress-1.2.0~b2/CHANGES.txt --- waitress-1.1.0/CHANGES.txt 2017-10-11 00:59:52.000000000 +0000 +++ waitress-1.2.0~b2/CHANGES.txt 2019-01-02 20:46:09.000000000 +0000 @@ -1,94 +1,87 @@ -1.1.0 (2017-10-10) ------------------- - -Features -~~~~~~~~ - -- Waitress now has a __main__ and thus may be called with ``python -mwaitress`` +1.2.0b2 (2019-02-02) +-------------------- Bugfixes ~~~~~~~~ -- Waitress no longer allows lowercase HTTP verbs. This change was made to fall - in line with most HTTP servers. See https://github.com/Pylons/waitress/pull/170 +- Fixed logic to no longer warn on writes where the output is required to have + a body but there may not be any data to be written. Solves issue posted on + the Pylons Project mailling list with 1.2.0b1. + -- When receiving non-ascii bytes in the request URL, waitress will no longer - abruptly close the connection, instead returning a 400 Bad Request. See - https://github.com/Pylons/waitress/pull/162 and - https://github.com/Pylons/waitress/issues/64 +1.2.0b1 (2018-12-31) +-------------------- -1.0.2 (2017-02-04) ------------------- +Happy New Year! Features ~~~~~~~~ -- Python 3.6 is now officially supported in Waitress - -Bugfixes -~~~~~~~~ - -- Add a work-around for libc issue on Linux not following the documented - standards. If getnameinfo() fails because of DNS not being available it - should return the IP address instead of the reverse DNS entry, however - instead getnameinfo() raises. We catch this, and ask getnameinfo() - for the same information again, explicitly asking for IP address instead of - reverse DNS hostname. See https://github.com/Pylons/waitress/issues/149 and - https://github.com/Pylons/waitress/pull/153 - -1.0.1 (2016-10-22) ------------------- - -Bugfixes -~~~~~~~~ - -- IPv6 support on Windows was broken due to missing constants in the socket - module. This has been resolved by setting the constants on Windows if they - are missing. See https://github.com/Pylons/waitress/issues/138 - -- A ValueError was raised on Windows when passing a string for the port, on - Windows in Python 2 using service names instead of port numbers doesn't work - with `getaddrinfo`. This has been resolved by attempting to convert the port - number to an integer, if that fails a ValueError will be raised. See - https://github.com/Pylons/waitress/issues/139 +- Setting the ``trusted_proxy`` setting to ``'*'`` (wildcard) will allow all + upstreams to be considered trusted proxies, thereby allowing services behind + Cloudflare/ELBs to function correctly whereby there may not be a singular IP + address that requests are received from. + + Using this setting is potentially dangerous if your server is also available + from anywhere on the internet, and further protections should be used to lock + down access to Waitress. See https://github.com/Pylons/waitress/pull/224 + +- Waitress has increased its support of the X-Forwarded-* headers and includes + Forwarded (RFC7239) support. This may be used to allow proxy servers to + influence the WSGI environment. See + https://github.com/Pylons/waitress/pull/209 + + This also provides a new security feature when using Waitress behind a proxy + in that it is possible to remove untrusted proxy headers thereby making sure + that downstream WSGI applications don't accidentally use those proxy headers + to make security decisions. + + The documentation has more information, see the following new arguments: + + - trusted_proxy_count + - trusted_proxy_headers + - clear_untrusted_proxy_headers + - log_untrusted_proxy_headers (useful for debugging) + + Be aware that the defaults for these are currently backwards compatible with + older versions of Waitress, this will change in a future release of waitress. + If you expect to need this behaviour please explicitly set these variables in + your configuration, or pin this version of waitress. + + Documentation: + https://docs.pylonsproject.org/projects/waitress/en/latest/reverse-proxy.html + +- Waitress can now accept a list of sockets that are already pre-bound rather + than creating its own to allow for socket activation. Support for init + systems/other systems that create said activated sockets is not included. See + https://github.com/Pylons/waitress/pull/215 - -1.0.0 (2016-08-31) ------------------- +- Server header can be omitted by specifying ``ident=None`` or ``ident=''``. + See https://github.com/Pylons/waitress/pull/187 Bugfixes ~~~~~~~~ -- Removed `AI_ADDRCONFIG` from the call to `getaddrinfo`, this resolves an - issue whereby `getaddrinfo` wouldn't return any addresses to `bind` to on - hosts where there is no internet connection but localhost is requested to be - bound to. See https://github.com/Pylons/waitress/issues/131 for more - information. - -Deprecations -~~~~~~~~~~~~ - -- Python 2.6 is no longer supported. - -Features -~~~~~~~~ - -- IPv6 support - -- Waitress is now able to listen on multiple sockets, including IPv4 and IPv6. - Instead of passing in a host/port combination you now provide waitress with a - space delineated list, and it will create as many sockets as required. - - .. code-block:: python - - from waitress import serve - serve(wsgiapp, listen='0.0.0.0:8080 [::]:9090 *:6543') - -Security -~~~~~~~~ - -- Waitress will now drop HTTP headers that contain an underscore in the key - when received from a client. This is to stop any possible underscore/dash - conflation that may lead to security issues. See - https://github.com/Pylons/waitress/pull/80 and - https://www.djangoproject.com/weblog/2015/jan/13/security/ +- Waitress will no longer send Transfer-Encoding or Content-Length for 1xx, + 204, or 304 responses, and will completely ignore any message body sent by + the WSGI application, making sure to follow the HTTP standard. See + https://github.com/Pylons/waitress/pull/166, + https://github.com/Pylons/waitress/issues/165, + https://github.com/Pylons/waitress/issues/152, and + https://github.com/Pylons/waitress/pull/202 + +Compatibility +~~~~~~~~~~~~~ + +- Waitress has now "vendored" asyncore into itself as ``waitress.wasyncore``. + This is to cope with the eventuality that asyncore will be removed from + the Python standard library in 3.8 or so. + +Documentation +~~~~~~~~~~~~~ + +- Bring in documentation of paste.translogger from Pyramid. Reorganize and + clean up documentation. See + https://github.com/Pylons/waitress/pull/205 + https://github.com/Pylons/waitress/pull/70 + https://github.com/Pylons/waitress/pull/206 diff -Nru waitress-1.1.0/contributing.md waitress-1.2.0~b2/contributing.md --- waitress-1.1.0/contributing.md 1970-01-01 00:00:00.000000000 +0000 +++ waitress-1.2.0~b2/contributing.md 2019-01-02 20:46:09.000000000 +0000 @@ -0,0 +1,95 @@ +Contributing +============ + +All projects under the Pylons Projects, including this one, follow the guidelines established at [How to Contribute](https://pylonsproject.org/community-how-to-contribute.html) and [Coding Style and Standards](https://pylonsproject.org/community-coding-style-standards.html). + + +Get support +----------- + +See [Get Support](https://pylonsproject.org/community-support.html). You are reading this document most likely because you want to *contribute* to the project and not *get support*. + + +Working on issues +----------------- + +To respect both your time and ours, we emphasize the following points. + +* We use the [Issue Tracker on GitHub](https://github.com/Pylons/waitress/issues) to discuss bugs, improvements, and feature requests. Search through existing issues before reporting a new one. Issues may be complex or wide-ranging. A discussion up front sets us all on the best path forward. +* Minor issues—such as spelling, grammar, and syntax—don't require discussion and a pull request is sufficient. +* After discussing the issue with maintainers and agreeing on a resolution, submit a pull request of your work. [GitHub Flow](https://guides.github.com/introduction/flow/index.html) describes the workflow process and why it's a good practice. + + +Git branches +------------ + +There is a single branch [master](https://github.com/Pylons/waitress/) on which development takes place and from which releases to PyPI are tagged. This is the default branch on GitHub. + + +Running tests and building documentation +---------------------------------------- + +We use [tox](https://tox.readthedocs.io/en/latest/) to automate test running, coverage, and building documentation across all supported Python versions. + +To run everything configured in the `tox.ini` file: + + $ tox + +To run tests on Python 2 and 3, and ensure full coverage, but exclude building of docs: + + $ tox -e py2-cover,py3-cover,coverage + +To build the docs only: + + $ tox -e docs + +See the `tox.ini` file for details. + + +Contributing documentation +-------------------------- + +*Note:* These instructions might not work for Windows users. Suggestions to improve the process for Windows users are welcome by submitting an issue or a pull request. + +1. Fork the repo on GitHub by clicking the [Fork] button. +2. Clone your fork into a workspace on your local machine. + + cd ~/projects + git clone git@github.com:/waitress.git + +3. Add a git remote "upstream" for the cloned fork. + + git remote add upstream git@github.com:Pylons/waitress.git + +4. Set an environment variable to your virtual environment. + + # Mac and Linux + $ export VENV=~/projects/waitress/env + + # Windows + set VENV=c:\projects\waitress\env + +5. Try to build the docs in your workspace. + + # Mac and Linux + $ make clean html SPHINXBUILD=$VENV/bin/sphinx-build + + # Windows + c:\> make clean html SPHINXBUILD=%VENV%\bin\sphinx-build + + If successful, then you can make changes to the documentation. You can load the built documentation in the `/_build/html/` directory in a web browser. + +6. From this point forward, follow the typical [git workflow](https://help.github.com/articles/what-is-a-good-git-workflow/). Start by pulling from the upstream to get the most current changes. + + git pull upstream master + +7. Make a branch, make changes to the docs, and rebuild them as indicated in step 5. To speed up the build process, you can omit `clean` from the above command to rebuild only those pages that depend on the files you have changed. + +8. Once you are satisfied with your changes and the documentation builds successfully without errors or warnings, then git commit and push them to your "origin" repository on GitHub. + + git commit -m "commit message" + git push -u origin --all # first time only, subsequent can be just 'git push'. + +9. Create a [pull request](https://help.github.com/articles/using-pull-requests/). + +10. Repeat the process starting from Step 6. diff -Nru waitress-1.1.0/CONTRIBUTORS.txt waitress-1.2.0~b2/CONTRIBUTORS.txt --- waitress-1.1.0/CONTRIBUTORS.txt 2017-10-11 00:59:52.000000000 +0000 +++ waitress-1.2.0~b2/CONTRIBUTORS.txt 2019-01-02 20:46:09.000000000 +0000 @@ -88,7 +88,7 @@ Licensing Exceptions ==================== -Code committed within the ``docs/`` subdirectory of the Pyramid source +Code committed within the ``docs/`` subdirectory of the Waitress source control repository and "docstrings" which appear in the documentation generated by running "make" within this directory is licensed under the Creative Commons Attribution-Noncommercial-Share Alike 3.0 United States @@ -98,7 +98,7 @@ ==================== The below-signed are contributors to a code repository that is part of the -project named "Pyramid". Each below-signed contributor has read, understand +project named "Waitress". Each below-signed contributor has read, understand and agrees to the terms above in the section within this document entitled "Pylons Project Contributor Agreement" as of the date beside his or her name. @@ -140,3 +140,7 @@ - Atsushi Odagiri, 2017-02-12 - David D Lowe, 2017-06-02 + +- Jack Wearden, 2018-05-18 + +- Frank Krick, 2018-10-29 diff -Nru waitress-1.1.0/debian/changelog waitress-1.2.0~b2/debian/changelog --- waitress-1.1.0/debian/changelog 2018-05-13 08:12:31.000000000 +0000 +++ waitress-1.2.0~b2/debian/changelog 2019-01-08 14:54:08.000000000 +0000 @@ -1,3 +1,20 @@ +waitress (1.2.0~b2-2) unstable; urgency=medium + + * Unbreak docco build (Closes: #918669). + + -- Andrej Shadura Tue, 08 Jan 2019 15:54:08 +0100 + +waitress (1.2.0~b2-1) unstable; urgency=medium + + [ Ondřej Nový ] + * d/copyright: Use https protocol in Format field. + * d/control: Add Vcs-* field. + + [ Andrej Shadura ] + * New upstream release. + + -- Andrej Shadura Mon, 07 Jan 2019 18:26:54 +0100 + waitress (1.1.0-1) unstable; urgency=medium * New upstream release. diff -Nru waitress-1.1.0/debian/control waitress-1.2.0~b2/debian/control --- waitress-1.1.0/debian/control 2018-05-13 08:12:31.000000000 +0000 +++ waitress-1.2.0~b2/debian/control 2019-01-08 14:54:08.000000000 +0000 @@ -12,8 +12,8 @@ python-sphinx (>= 1.0.7+dfsg) | python3-sphinx, debhelper (>= 9) Standards-Version: 3.9.5 -Vcs-Git: https://git.dgit.debian.org/waitress -Vcs-Browser: https://browse.dgit.debian.org/waitress.git/ +Vcs-Browser: https://salsa.debian.org/debian/waitress +Vcs-Git: https://salsa.debian.org/debian/waitress.git Testsuite: autopkgtest-pkg-python Package: python-waitress diff -Nru waitress-1.1.0/debian/copyright waitress-1.2.0~b2/debian/copyright --- waitress-1.1.0/debian/copyright 2018-05-13 08:12:31.000000000 +0000 +++ waitress-1.2.0~b2/debian/copyright 2019-01-08 14:54:08.000000000 +0000 @@ -1,4 +1,4 @@ -Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ +Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ Upstream-Name: Chris McDonough Upstream-Contact: chrism@plope.com Source: http://github.com/Pylons/waitress diff -Nru waitress-1.1.0/debian/patches/01-fix-sphinxdoc-conf.patch waitress-1.2.0~b2/debian/patches/01-fix-sphinxdoc-conf.patch --- waitress-1.1.0/debian/patches/01-fix-sphinxdoc-conf.patch 2018-05-13 08:12:31.000000000 +0000 +++ waitress-1.2.0~b2/debian/patches/01-fix-sphinxdoc-conf.patch 2019-01-08 14:54:08.000000000 +0000 @@ -5,14 +5,14 @@ +++ b/docs/conf.py @@ -19,7 +19,7 @@ - import sys, os + import datetime import pkg_resources -import pylons_sphinx_themes +# import pylons_sphinx_themes # General configuration # --------------------- -@@ -47,7 +47,7 @@ +@@ -53,7 +53,7 @@ # other places throughout the built documents. # # The short X.Y version. @@ -21,16 +21,37 @@ # The full version, including alpha/beta/rc tags. release = version -@@ -89,9 +89,9 @@ +@@ -98,9 +98,9 @@ # ----------------------- # Add and use Pylons theme -html_theme = 'pylons' -html_theme_path = pylons_sphinx_themes.get_html_themes_path() --html_theme_options = dict(github_url='http://github.com/Pylons/waitress') +-html_theme_options = dict(github_url='https://github.com/Pylons/waitress') +# html_theme = 'pylons' -+# html_theme_path = # pylons_sphinx_themes.get_html_themes_path() -+# html_theme_options = dict(github_url='http://github.com/Pylons/waitress') ++# html_theme_path = pylons_sphinx_themes.get_html_themes_path() ++# html_theme_options = dict(github_url='https://github.com/Pylons/waitress') # The style sheet to use for HTML and HTML Help pages. A file of that name # must exist either in Sphinx' static/ path, or in one of the custom paths +@@ -170,13 +170,13 @@ + htmlhelp_basename = 'waitress' + + # Control display of sidebars +-html_sidebars = {'**': [ +- 'localtoc.html', +- 'ethicalads.html', +- 'relations.html', +- 'sourcelink.html', +- 'searchbox.html', +-]} ++#html_sidebars = {'**': [ ++# 'localtoc.html', ++# 'ethicalads.html', ++# 'relations.html', ++# 'sourcelink.html', ++# 'searchbox.html', ++#]} + + # Options for LaTeX output + # ------------------------ diff -Nru waitress-1.1.0/debian/README.Debian waitress-1.2.0~b2/debian/README.Debian --- waitress-1.1.0/debian/README.Debian 1970-01-01 00:00:00.000000000 +0000 +++ waitress-1.2.0~b2/debian/README.Debian 2019-01-08 14:54:08.000000000 +0000 @@ -0,0 +1 @@ +The maintainer of this package uses dgit. Please consider using dgit push to upload it. diff -Nru waitress-1.1.0/docs/arguments.rst waitress-1.2.0~b2/docs/arguments.rst --- waitress-1.1.0/docs/arguments.rst 2017-10-11 00:59:52.000000000 +0000 +++ waitress-1.2.0~b2/docs/arguments.rst 2019-01-02 20:46:09.000000000 +0000 @@ -7,7 +7,7 @@ in :term:`PasteDeploy` configuration (interchangeably): host - hostname or IP address (string) on which to listen, default ``0.0.0.0``, + Hostname or IP address (string) on which to listen, default ``0.0.0.0``, which means "all IP addresses on this host". .. warning:: @@ -20,18 +20,18 @@ May not be used with ``listen`` listen - Tell waitress to listen on an host/port combination. It is to be provided - as a space delineated list of host/port: + Tell waitress to listen on combinations of ``host:port`` arguments. + Combinations should be a quoted, space-delimited list, as in the following examples. - Examples: + .. code-block:: python - - ``listen="127.0.0.1:8080 [::1]:8080"`` - - ``listen="*:8080 *:6543"`` + listen="127.0.0.1:8080 [::1]:8080" + listen="*:8080 *:6543" - A wildcard for the hostname is also supported and will bind to both - IPv4/IPv6 depending on whether they are enabled or disabled. + A wildcard for the hostname is also supported and will bind to both + IPv4/IPv6 depending on whether they are enabled or disabled. - IPv6 IP addresses are supported by surrounding the IP address with brackets. + IPv6 IP addresses are supported by surrounding the IP address with brackets. .. versionadded:: 1.0 @@ -42,66 +42,171 @@ Enable or disable IPv6 (boolean) unix_socket - Path of Unix socket (string), default is ``None``. If a socket path is - specified, a Unix domain socket is made instead of the usual inet domain - socket. + Path of Unix socket (string). If a socket path is specified, a Unix domain + socket is made instead of the usual inet domain socket. Not available on Windows. + Default: ``None`` + unix_socket_perms - Octal permissions to use for the Unix domain socket (string), default is - ``600``. Only used if ``unix_socket`` is not ``None``. + Octal permissions to use for the Unix domain socket (string). + Only used if ``unix_socket`` is not ``None``. + + Default: ``'600'`` + +sockets + A list of sockets. The sockets can be either Internet or UNIX sockets and have + to be bound. Internet and UNIX sockets cannot be mixed. + If the socket list is not empty, waitress creates one server for each socket. + + Default: ``[]`` + + .. versionadded:: 1.1.1 + + .. warning:: + May not be used with ``listen``, ``host``, ``port`` or ``unix_socket`` threads - number of threads used to process application logic (integer), default - ``4`` + The number of threads used to process application logic (integer). + + Default: ``4`` trusted_proxy - IP address of a client allowed to override ``url_scheme`` via the - ``X_FORWARDED_PROTO`` header. + IP address of a remote peer allowed to override various WSGI environment + variables using proxy headers. + + For unix sockets, set this value to ``localhost`` instead of an IP address. + + Default: ``None`` + +trusted_proxy_count + How many proxies we trust when chained. For example, + + ``X-Forwarded-For: 192.0.2.1, "[2001:db8::1]"`` + + or + + ``Forwarded: for=192.0.2.1, For="[2001:db8::1]"`` + + means there were (potentially), two proxies involved. If we know there is + only 1 valid proxy, then that initial IP address "192.0.2.1" is not trusted + and we completely ignore it. + + If there are two trusted proxies in the path, this value should be set to + 2. If there are more proxies, this value should be set higher. + + Default: ``1`` + + .. versionadded:: 1.2.0 + +trusted_proxy_headers + Which of the proxy headers should we trust, this is a set where you + either specify "forwarded" or one or more of "x-forwarded-host", "x-forwarded-for", + "x-forwarded-proto", "x-forwarded-port", "x-forwarded-by". + + This list of trusted headers is used when ``trusted_proxy`` is set and will + allow waitress to modify the WSGI environment using the values provided by + the proxy. + + .. versionadded:: 1.2.0 + + .. warning:: + If ``trusted_proxy`` is set, the default is ``x-forwarded-proto`` to + match older versions of Waitress. Users should explicitly opt-in by + selecting the headers to be trusted as future versions of waitress will + use an empty default. + + .. warning:: + It is an error to set this value without setting ``trusted_proxy``. + +log_untrusted_proxy_headers + Should waitress log warning messages about proxy headers that are being + sent from upstream that are not trusted by ``trusted_proxy_headers`` but + are being cleared due to ``clear_untrusted_proxy_headers``? + + This may be useful for debugging if you expect your upstream proxy server + to only send specific headers. + + Default: ``False`` + + .. versionadded:: 1.2.0 + + .. warning:: + It is a no-op to set this value without also setting + ``clear_untrusted_proxy_headers`` and ``trusted_proxy`` + +clear_untrusted_proxy_headers + This tells Waitress to remove any untrusted proxy headers ("Forwarded", + "X-Forwared-For", "X-Forwarded-By", "X-Forwarded-Host", "X-Forwarded-Port", + "X-Forwarded-Proto") not explicitly allowed by ``trusted_proxy_headers``. + + Default: ``False`` + + .. versionadded:: 1.2.0 + + .. warning:: + The default value is set to ``False`` for backwards compatibility. In + future versions of Waitress this default will be changed to ``True``. + Warnings will be raised unless the user explicitly provides a value for + this option, allowing the user to opt-in to the new safety features + automatically. + + .. warning:: + It is an error to set this value without setting ``trusted_proxy``. url_scheme - default ``wsgi.url_scheme`` value (string), default ``http``; can be + The value of ``wsgi.url_scheme`` in the environ. This can be overridden per-request by the value of the ``X_FORWARDED_PROTO`` header, but only if the client address matches ``trusted_proxy``. + Default: ``http`` + ident - server identity (string) used in "Server:" header in responses, default - ``waitress`` + Server identity (string) used in "Server:" header in responses. + + Default: ``waitress`` backlog - backlog is the value waitress passes to pass to socket.listen() - (integer), default ``1024``. This is the maximum number of incoming TCP + The value waitress passes to pass to ``socket.listen()`` (integer). + This is the maximum number of incoming TCP connections that will wait in an OS queue for an available channel. From listen(1): "If a connection request arrives when the queue is full, the client may receive an error with an indication of ECONNREFUSED or, if the underlying protocol supports retransmission, the request may be ignored so that a later reattempt at connection succeeds." + Default: ``1024`` + recv_bytes - recv_bytes is the argument waitress passes to socket.recv() (integer), - default ``8192`` + The argument waitress passes to ``socket.recv()`` (integer). + + Default: ``8192`` send_bytes - send_bytes is the number of bytes to send to socket.send() (integer), - default ``18000``. Multiples of 9000 should avoid partly-filled TCP + The number of bytes to send to ``socket.send()`` (integer). + Multiples of 9000 should avoid partly-filled TCP packets, but don't set this larger than the TCP write buffer size. In - Linux, /proc/sys/net/ipv4/tcp_wmem controls the minimum, default, and + Linux, ``/proc/sys/net/ipv4/tcp_wmem`` controls the minimum, default, and maximum sizes of TCP write buffers. + Default: ``18000`` + outbuf_overflow A tempfile should be created if the pending output is larger than - outbuf_overflow, which is measured in bytes. The default is 1MB - (``1048576``). This is conservative. + outbuf_overflow, which is measured in bytes. The default is conservative. + + Default: ``1048576`` (1MB) inbuf_overflow A tempfile should be created if the pending input is larger than - inbuf_overflow, which is measured in bytes. The default is 512K - (``524288``). This is conservative. + inbuf_overflow, which is measured in bytes. The default is conservative. + + Default: ``524288`` (512K) connection_limit Stop creating new channels if too many are already active (integer). - Default is ``100``. Each channel consumes at least one file descriptor, + Each channel consumes at least one file descriptor, and, depending on the input and output body sizes, potentially up to three, plus whatever file descriptors your application logic happens to open. The default is conservative, but you may need to increase the @@ -111,45 +216,62 @@ connections that can be waiting for processing; the ``backlog`` argument controls that. + Default: ``100`` + cleanup_interval - Minimum seconds between cleaning up inactive channels (integer), default - ``30``. See "channel_timeout". + Minimum seconds between cleaning up inactive channels (integer). + See also ``channel_timeout``. + + Default: ``30`` channel_timeout - Maximum seconds to leave an inactive connection open (integer), default - ``120``. "Inactive" is defined as "has received no data from a client + Maximum seconds to leave an inactive connection open (integer). + "Inactive" is defined as "has received no data from a client and has sent no data to a client". + Default: ``120`` + log_socket_errors - Boolean: turn off to not log premature client disconnect tracebacks. - Default: ``True``. + Set to ``False`` to not log premature client disconnect tracebacks. + + Default: ``True`` max_request_header_size - maximum number of bytes of all request headers combined (integer), 256K - (``262144``) default) + Maximum number of bytes of all request headers combined (integer). + + Default: ``262144`` (256K) max_request_body_size - maximum number of bytes in request body (integer), 1GB (``1073741824``) - default. + Maximum number of bytes in request body (integer). + + Default: ``1073741824`` (1GB) expose_tracebacks - Boolean: expose tracebacks of unhandled exceptions to client. Default: - ``False``. + Set to ``True`` to expose tracebacks of unhandled exceptions to client. + + Default: ``False`` asyncore_loop_timeout - The ``timeout`` value (seconds) passed to ``asyncore.loop`` to run the - mainloop. Default: 1. (New in 0.8.3.) + The ``timeout`` value (seconds) passed to ``asyncore.loop`` to run the mainloop. + + Default: ``1`` + + .. versionadded:: 0.8.3 asyncore_use_poll - Boolean: switch from using select() to poll() in ``asyncore.loop``. - By default asyncore.loop() uses select() which has a limit of 1024 - file descriptors. Select() and poll() provide basically the same - functionality, but poll() doesn't have the file descriptors limit. - Default: False (New in 0.8.6) + Set to ``True`` to switch from using ``select()`` to ``poll()`` in ``asyncore.loop``. + By default ``asyncore.loop()`` uses ``select()`` which has a limit of 1024 file descriptors. + ``select()`` and ``poll()`` provide basically the same functionality, but ``poll()`` doesn't have the file descriptors limit. + + Default: ``False`` + + .. versionadded:: 0.8.6 url_prefix String: the value used as the WSGI ``SCRIPT_NAME`` value. Setting this to anything except the empty string will cause the WSGI ``SCRIPT_NAME`` value to be the value passed minus any trailing slashes you add, and it will cause the ``PATH_INFO`` of any request which is prefixed with this value to - be stripped of the prefix. Default: the empty string. + be stripped of the prefix. + + Default: ``''`` diff -Nru waitress-1.1.0/docs/conf.py waitress-1.2.0~b2/docs/conf.py --- waitress-1.1.0/docs/conf.py 2017-10-11 00:59:52.000000000 +0000 +++ waitress-1.2.0~b2/docs/conf.py 2019-01-02 20:46:09.000000000 +0000 @@ -17,7 +17,7 @@ # make it absolute, like shown here. #sys.path.append(os.path.abspath('some/directory')) -import sys, os +import datetime import pkg_resources import pylons_sphinx_themes @@ -28,7 +28,12 @@ # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = [ 'sphinx.ext.autodoc', - ] + 'sphinx.ext.intersphinx', +] + +intersphinx_mapping = { + 'python': ('https://docs.python.org/3/', None), +} # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] @@ -41,7 +46,8 @@ # General substitutions. project = 'waitress' -copyright = '2012, Agendaless Consulting ' +thisyear = datetime.datetime.now().year +copyright = '2012-%s, Agendaless Consulting ' % thisyear # The default replacements for |version| and |release|, also used in various # other places throughout the built documents. @@ -84,6 +90,9 @@ # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' +# Do not use smart quotes. +smartquotes = False + # Options for HTML output # ----------------------- @@ -91,7 +100,7 @@ # Add and use Pylons theme html_theme = 'pylons' html_theme_path = pylons_sphinx_themes.get_html_themes_path() -html_theme_options = dict(github_url='http://github.com/Pylons/waitress') +html_theme_options = dict(github_url='https://github.com/Pylons/waitress') # The style sheet to use for HTML and HTML Help pages. A file of that name # must exist either in Sphinx' static/ path, or in one of the custom paths @@ -158,8 +167,16 @@ #html_file_suffix = '' # Output file base name for HTML help builder. -htmlhelp_basename = 'atemplatedoc' +htmlhelp_basename = 'waitress' +# Control display of sidebars +html_sidebars = {'**': [ + 'localtoc.html', + 'ethicalads.html', + 'relations.html', + 'sourcelink.html', + 'searchbox.html', +]} # Options for LaTeX output # ------------------------ @@ -175,7 +192,7 @@ # author, document class [howto/manual]). latex_documents = [ ('index', 'waitress.tex', 'waitress Documentation', - 'Pylons Developers', 'manual'), + 'Pylons Project Developers', 'manual'), ] # The name of an image file (relative to this directory) to place at the diff -Nru waitress-1.1.0/docs/design.rst waitress-1.2.0~b2/docs/design.rst --- waitress-1.1.0/docs/design.rst 2017-10-11 00:59:52.000000000 +0000 +++ waitress-1.2.0~b2/docs/design.rst 2019-01-02 20:46:09.000000000 +0000 @@ -1,11 +1,19 @@ Design ------ -Waitress uses a combination of asynchronous and synchronous code to do its -job. It handles I/O to and from clients using the :term:`asyncore` library. +Waitress uses a combination of asynchronous and synchronous code to do its job. +It handles I/O to and from clients using the :term:`wasyncore`, which is :term:`asyncore` vendored into Waitress. It services requests via threads. -The :term:`asyncore` module in the Python standard library: +.. note:: + :term:`asyncore` has been deprecated since Python 3.6. + Work continues on its inevitable removal from the Python standard library. + Its recommended replacement is :mod:`asyncio`. + + Although :term:`asyncore` has been vendored into Waitress as :term:`wasyncore`, you may see references to "asyncore" in this documentation's code examples and API. + The terms are effectively the same and may be used interchangeably. + +The :term:`wasyncore` module: - Uses the ``select.select`` function to wait for connections from clients and determine if a connected client is ready to receive output. @@ -37,10 +45,12 @@ threads are in use, scheduled tasks will wait in a queue for a worker thread to become available. -I/O is always done asynchronously (by asyncore) in the main thread. Worker -threads never do any I/O. This means that 1) a large number of clients can -be connected to the server at once and 2) worker threads will never be hung -up trying to send data to a slow client. +I/O is always done asynchronously (by :term:`wasyncore`) in the main thread. +Worker threads never do any I/O. +This means that + +#. a large number of clients can be connected to the server at once, and +#. worker threads will never be hung up trying to send data to a slow client. No attempt is made to kill a "hung thread". It's assumed that when a task (application logic) starts that it will eventually complete. If for some diff -Nru waitress-1.1.0/docs/filewrapper.rst waitress-1.2.0~b2/docs/filewrapper.rst --- waitress-1.1.0/docs/filewrapper.rst 2017-10-11 00:59:52.000000000 +0000 +++ waitress-1.2.0~b2/docs/filewrapper.rst 2019-01-02 20:46:09.000000000 +0000 @@ -1,9 +1,7 @@ Support for ``wsgi.file_wrapper`` --------------------------------- -Waitress supports the `WSGI file_wrapper protocol -`_ -. Here's a usage example: +Waitress supports the Python Web Server Gateway Interface v1.0 as specified in :pep:`3333`. Here's a usage example: .. code-block:: python diff -Nru waitress-1.1.0/docs/glossary.rst waitress-1.2.0~b2/docs/glossary.rst --- waitress-1.1.0/docs/glossary.rst 2017-10-11 00:59:52.000000000 +0000 +++ waitress-1.2.0~b2/docs/glossary.rst 2019-01-02 20:46:09.000000000 +0000 @@ -4,12 +4,31 @@ ======== .. glossary:: - :sorted: + :sorted: - PasteDeploy - A system for configuration of WSGI web components in declarative - ``.ini`` format. See http://pythonpaste.org/deploy/. - - asyncore - A standard library module for asynchronous communications. See - http://docs.python.org/library/asyncore.html . + PasteDeploy + A system for configuration of WSGI web components in declarative ``.ini`` format. + See https://web.archive.org/web/20161029233359/http://pythonpaste.org/deploy/. + + asyncore + A Python standard library module for asynchronous communications. See :mod:`asyncore`. + + .. versionchanged:: 1.2.0 + Waitress has now "vendored" ``asyncore`` into itself as ``waitress.wasyncore``. + This is to cope with the eventuality that ``asyncore`` will be removed from the Python standard library in Python 3.8 or so. + + middleware + *Middleware* is a :term:`WSGI` concept. + It is a WSGI component that acts both as a server and an application. + Interesting uses for middleware exist, such as caching, content-transport encoding, and other functions. + See `WSGI.org `_ or `PyPI `_ to find middleware for your application. + + WSGI + `Web Server Gateway Interface `_. + This is a Python standard for connecting web applications to web servers, similar to the concept of Java Servlets. + Waitress requires that your application be served as a WSGI application. + + wasyncore + .. versionchanged:: 1.2.0 + Waitress has now "vendored" :term:`asyncore` into itself as ``waitress.wasyncore``. + This is to cope with the eventuality that ``asyncore`` will be removed from the Python standard library in Python 3.8 or so. diff -Nru waitress-1.1.0/docs/index.rst waitress-1.2.0~b2/docs/index.rst --- waitress-1.1.0/docs/index.rst 2017-10-11 00:59:52.000000000 +0000 +++ waitress-1.2.0~b2/docs/index.rst 2019-01-02 20:46:09.000000000 +0000 @@ -1,298 +1,15 @@ +.. _index: + +======== Waitress --------- +======== Waitress is meant to be a production-quality pure-Python WSGI server with very acceptable performance. It has no dependencies except ones which live in the Python standard library. It runs on CPython on Unix and Windows under -Python 2.7+ and Python 3.3+. It is also known to run on PyPy 1.6.0 on UNIX. +Python 2.7+ and Python 3.4+. It is also known to run on PyPy 1.6.0 on UNIX. It supports HTTP/1.0 and HTTP/1.1. -Usage ------ - -Here's normal usage of the server: - -.. code-block:: python - - from waitress import serve - serve(wsgiapp, listen='*:8080') - -This will run waitress on port 8080 on all available IP addresses, both IPv4 -and IPv6. - - -.. code-block:: python - - from waitress import serve - serve(wsgiapp, host='0.0.0.0', port=8080) - -This will run waitress on port 8080 on all available IPv4 addresses. - -If you want to serve your application on all IP addresses, on port 8080, you -can omit the ``host`` and ``port`` arguments and just call ``serve`` with the -WSGI app as a single argument: - -.. code-block:: python - - from waitress import serve - serve(wsgiapp) - -Press Ctrl-C (or Ctrl-Break on Windows) to exit the server. - -The default is to bind to any IPv4 address on port 8080: - -.. code-block:: python - - from waitress import serve - serve(wsgiapp) - -If you want to serve your application through a UNIX domain socket (to serve -a downstream HTTP server/proxy, e.g. nginx, lighttpd, etc.), call ``serve`` -with the ``unix_socket`` argument: - -.. code-block:: python - - from waitress import serve - serve(wsgiapp, unix_socket='/path/to/unix.sock') - -Needless to say, this configuration won't work on Windows. - -Exceptions generated by your application will be shown on the console by -default. See :ref:`logging` to change this. - -There's an entry point for :term:`PasteDeploy` (``egg:waitress#main``) that -lets you use Waitress's WSGI gateway from a configuration file, e.g.: - -.. code-block:: ini - - [server:main] - use = egg:waitress#main - listen = 127.0.0.1:8080 - -Using ``host`` and ``port`` is also supported: - -.. code-block:: ini - - [server:main] - host = 127.0.0.1 - port = 8080 - -The :term:`PasteDeploy` syntax for UNIX domain sockets is analagous: - -.. code-block:: ini - - [server:main] - use = egg:waitress#main - unix_socket = /path/to/unix.sock - -You can find more settings to tweak (arguments to ``waitress.serve`` or -equivalent settings in PasteDeploy) in :ref:`arguments`. - -Additionally, there is a command line runner called ``waitress-serve``, which -can be used in development and in situations where the likes of -:term:`PasteDeploy` is not necessary: - -.. code-block:: bash - - # Listen on both IPv4 and IPv6 on port 8041 - waitress-serve --listen=*:8041 myapp:wsgifunc - - # Listen on only IPv4 on port 8041 - waitress-serve --port=8041 myapp:wsgifunc - -For more information on this, see :ref:`runner`. - -.. _logging: - -Logging -------- - -``waitress.serve`` calls ``logging.basicConfig()`` to set up logging to the -console when the server starts up. Assuming no other logging configuration -has already been done, this sets the logging default level to -``logging.WARNING``. The Waitress logger will inherit the root logger's -level information (it logs at level ``WARNING`` or above). - -Waitress sends its logging output (including application exception -renderings) to the Python logger object named ``waitress``. You can -influence the logger level and output stream using the normal Python -``logging`` module API. For example: - -.. code-block:: python - - import logging - logger = logging.getLogger('waitress') - logger.setLevel(logging.INFO) - -Within a PasteDeploy configuration file, you can use the normal Python -``logging`` module ``.ini`` file format to change similar Waitress logging -options. For example: - -.. code-block:: ini - - [logger_waitress] - level = INFO - -Using Behind a Reverse Proxy ----------------------------- - -Often people will set up "pure Python" web servers behind reverse proxies, -especially if they need SSL support (Waitress does not natively support SSL). -Even if you don't need SSL support, it's not uncommon to see Waitress and -other pure-Python web servers set up to "live" behind a reverse proxy; these -proxies often have lots of useful deployment knobs. - -If you're using Waitress behind a reverse proxy, you'll almost always want -your reverse proxy to pass along the ``Host`` header sent by the client to -Waitress, in either case, as it will be used by most applications to generate -correct URLs. - -For example, when using Nginx as a reverse proxy, you might add the following -lines in a ``location`` section:: - - proxy_set_header Host $host; - -The Apache directive named ``ProxyPreserveHost`` does something similar when -used as a reverse proxy. - -Unfortunately, even if you pass the ``Host`` header, the Host header does not -contain enough information to regenerate the original URL sent by the client. -For example, if your reverse proxy accepts HTTPS requests (and therefore URLs -which start with ``https://``), the URLs generated by your application when -used behind a reverse proxy served by Waitress might inappropriately be -``http://foo`` rather than ``https://foo``. To fix this, you'll want to -change the ``wsgi.url_scheme`` in the WSGI environment before it reaches your -application. You can do this in one of three ways: - -1. You can pass a ``url_scheme`` configuration variable to the - ``waitress.serve`` function. - -2. You can configure the proxy reverse server to pass a header, - ``X_FORWARDED_PROTO``, whose value will be set for that request as - the ``wsgi.url_scheme`` environment value. Note that you must also - conigure ``waitress.serve`` by passing the IP address of that proxy - as its ``trusted_proxy``. - -3. You can use Paste's ``PrefixMiddleware`` in conjunction with - configuration settings on the reverse proxy server. - -Using ``url_scheme`` to set ``wsgi.url_scheme`` -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -You can have the Waitress server use the ``https`` url scheme by default.: - -.. code-block:: python - - from waitress import serve - serve(wsgiapp, listen='0.0.0.0:8080', url_scheme='https') - -This works if all URLs generated by your application should use the ``https`` -scheme. - -Passing the ``X_FORWARDED_PROTO`` header to set ``wsgi.url_scheme`` -------------------------------------------------------------------- - -If your proxy accepts both HTTP and HTTPS URLs, and you want your application -to generate the appropriate url based on the incoming scheme, also set up -your proxy to send a ``X-Forwarded-Proto`` with the original URL scheme along -with each proxied request. For example, when using Nginx:: - - proxy_set_header X-Forwarded-Proto $scheme; - -or via Apache:: - - RequestHeader set X-Forwarded-Proto https - -.. note:: - - You must also configure the Waitress server's ``trusted_proxy`` to - contain the IP address of the proxy in order for this header to override - the default URL scheme. - -Using ``url_prefix`` to influence ``SCRIPT_NAME`` and ``PATH_INFO`` -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -You can have the Waitress server use a particular url prefix by default for all -URLs generated by downstream applications that take ``SCRIPT_NAME`` into -account.: - -.. code-block:: python - - from waitress import serve - serve(wsgiapp, listen='0.0.0.0:8080', url_prefix='/foo') - -Setting this to any value except the empty string will cause the WSGI -``SCRIPT_NAME`` value to be that value, minus any trailing slashes you add, and -it will cause the ``PATH_INFO`` of any request which is prefixed with this -value to be stripped of the prefix. This is useful in proxying scenarios where -you wish to forward all traffic to a Waitress server but need URLs generated by -downstream applications to be prefixed with a particular path segment. - -Using Paste's ``PrefixMiddleware`` to set ``wsgi.url_scheme`` -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -If only some of the URLs generated by your application should use the -``https`` scheme (and some should use ``http``), you'll need to use Paste's -``PrefixMiddleware`` as well as change some configuration settings on your -proxy. To use ``PrefixMiddleware``, wrap your application before serving it -using Waitress: - -.. code-block:: python - - from waitress import serve - from paste.deploy.config import PrefixMiddleware - app = PrefixMiddleware(app) - serve(app) - -Once you wrap your application in the the ``PrefixMiddleware``, the -middleware will notice certain headers sent from your proxy and will change -the ``wsgi.url_scheme`` and possibly other WSGI environment variables -appropriately. - -Once your application is wrapped by the prefix middleware, you should -instruct your proxy server to send along the original ``Host`` header from -the client to your Waitress server, as well as sending along a -``X-Forwarded-Proto`` header with the appropriate value for -``wsgi.url_scheme``. - -If your proxy accepts both HTTP and HTTPS URLs, and you want your application -to generate the appropriate url based on the incoming scheme, also set up -your proxy to send a ``X-Forwarded-Proto`` with the original URL scheme along -with each proxied request. For example, when using Nginx:: - - proxy_set_header X-Forwarded-Proto $scheme; - -It's permitted to set an ``X-Forwarded-For`` header too; the -``PrefixMiddleware`` uses this to adjust other environment variables (you'll -have to read its docs to find out which ones, I don't know what they are). For -the ``X-Forwarded-For`` header:: - - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - -Note that you can wrap your application in the PrefixMiddleware declaratively -in a :term:`PasteDeploy` configuration file too, if your web framework uses -PasteDeploy-style configuration: - -.. code-block:: ini - - [app:myapp] - use = egg:mypackage#myapp - - [filter:paste_prefix] - use = egg:PasteDeploy#prefix - - [pipeline:main] - pipeline = - paste_prefix - myapp - - [server:main] - use = egg:waitress#main - listen = 127.0.0.1:8080 - -Note that you can also set ``PATH_INFO`` and ``SCRIPT_NAME`` using -PrefixMiddleware too (its original purpose, really) instead of using Waitress' -``url_prefix`` adjustment. See the PasteDeploy docs for more information. Extended Documentation ---------------------- @@ -300,13 +17,17 @@ .. toctree:: :maxdepth: 1 - design.rst - differences.rst - api.rst - arguments.rst - filewrapper.rst - runner.rst - glossary.rst + usage + logging + reverse-proxy + design + differences + api + arguments + filewrapper + runner + socket-activation + glossary Change History -------------- @@ -317,33 +38,31 @@ Known Issues ------------ -- Does not support SSL natively. +- Does not support TLS natively. See :ref:`using-behind-a-reverse-proxy` for more information. Support and Development ----------------------- -The `Pylons Project web site `_ is the main online +The `Pylons Project web site `_ is the main online source of Waitress support and development information. To report bugs, use the `issue tracker -`_. +`_. If you've got questions that aren't answered by this documentation, -contact the `Pylons-devel maillist -`_ or join the `#pyramid -IRC channel `_. +contact the `Pylons-discuss maillist +`_ or join the `#pyramid +IRC channel `_. Browse and check out tagged and trunk versions of Waitress via -the `Waitress GitHub repository `_. +the `Waitress GitHub repository `_. To check out the trunk via ``git``, use this command: .. code-block:: text git clone git@github.com:Pylons/waitress.git -To find out how to become a contributor to Waitress, please see the -`contributor's section of the documentation -`_. +To find out how to become a contributor to Waitress, please see the guidelines in `contributing.md `_ and `How to Contribute Source Code and Documentation `_. Why? ---- @@ -373,7 +92,7 @@ suite of the CherryPy server also depends on the CherryPy web framework, so even if we forked its server component into a separate distribution, we would have still needed to backfill for all of its tests. The CherryPy team has -started work on `Cheroot `_, which +started work on `Cheroot `_, which should solve this problem, however. Waitress is a fork of the WSGI-related components which existed in diff -Nru waitress-1.1.0/docs/logging.rst waitress-1.2.0~b2/docs/logging.rst --- waitress-1.1.0/docs/logging.rst 1970-01-01 00:00:00.000000000 +0000 +++ waitress-1.2.0~b2/docs/logging.rst 2019-01-02 20:46:09.000000000 +0000 @@ -0,0 +1,190 @@ +.. _access-logging: + +============== +Access Logging +============== + +The WSGI design is modular. Waitress logs error conditions, debugging +output, etc., but not web traffic. For web traffic logging, Paste +provides `TransLogger +`_ +:term:`middleware`. TransLogger produces logs in the `Apache Combined +Log Format `_. + + +.. _logging-to-the-console-using-python: + +Logging to the Console Using Python +----------------------------------- + +``waitress.serve`` calls ``logging.basicConfig()`` to set up logging to the +console when the server starts up. Assuming no other logging configuration +has already been done, this sets the logging default level to +``logging.WARNING``. The Waitress logger will inherit the root logger's +level information (it logs at level ``WARNING`` or above). + +Waitress sends its logging output (including application exception +renderings) to the Python logger object named ``waitress``. You can +influence the logger level and output stream using the normal Python +``logging`` module API. For example: + +.. code-block:: python + + import logging + logger = logging.getLogger('waitress') + logger.setLevel(logging.INFO) + +Within a PasteDeploy configuration file, you can use the normal Python +``logging`` module ``.ini`` file format to change similar Waitress logging +options. For example: + +.. code-block:: ini + + [logger_waitress] + level = INFO + + +.. _logging-to-the-console-using-pastedeploy: + +Logging to the Console Using PasteDeploy +---------------------------------------- + +TransLogger will automatically setup a logging handler to the console when called with no arguments. +It "just works" in environments that don't configure logging. +This is by virtue of its default configuration setting of ``setup_console_handler = True``. + + +.. TODO: +.. .. _logging-to-a-file-using-python: + +.. Logging to a File Using Python +.. ------------------------------ + +.. Show how to configure the WSGI logger via python. + + +.. _logging-to-a-file-using-pastedeploy: + +Logging to a File Using PasteDeploy +------------------------------------ + +TransLogger does not write to files, and the Python logging system +must be configured to do this. The Python class :class:`FileHandler` +logging handler can be used alongside TransLogger to create an +``access.log`` file similar to Apache's. + +Like any standard :term:`middleware` with a Paste entry point, +TransLogger can be configured to wrap your application using ``.ini`` +file syntax. First add a +``[filter:translogger]`` section, then use a ``[pipeline:main]`` +section file to form a WSGI pipeline with both the translogger and +your application in it. For instance, if you have this: + +.. code-block:: ini + + [app:wsgiapp] + use = egg:mypackage#wsgiapp + + [server:main] + use = egg:waitress#main + host = 127.0.0.1 + port = 8080 + +Add this: + +.. code-block:: ini + + [filter:translogger] + use = egg:Paste#translogger + setup_console_handler = False + + [pipeline:main] + pipeline = translogger + wsgiapp + +Using PasteDeploy this way to form and serve a pipeline is equivalent to +wrapping your app in a TransLogger instance via the bottom of the ``main`` +function of your project's ``__init__`` file: + +.. code-block:: python + + from mypackage import wsgiapp + from waitress import serve + from paste.translogger import TransLogger + serve(TransLogger(wsgiapp, setup_console_handler=False)) + +.. note:: + TransLogger will automatically set up a logging handler to the console when + called with no arguments, so it "just works" in environments that don't + configure logging. Since our logging handlers are configured, we disable + the automation via ``setup_console_handler = False``. + +With the filter in place, TransLogger's logger (named the ``wsgi`` logger) will +propagate its log messages to the parent logger (the root logger), sending +its output to the console when we request a page: + +.. code-block:: text + + 00:50:53,694 INFO [wsgiapp] Returning: Hello World! + (content-type: text/plain) + 00:50:53,695 INFO [wsgi] 192.168.1.111 - - [11/Aug/2011:20:09:33 -0700] "GET /hello + HTTP/1.1" 404 - "-" + "Mozilla/5.0 (Macintosh; U; Intel Mac OS X; en-US; rv:1.8.1.6) Gecko/20070725 + Firefox/2.0.0.6" + +To direct TransLogger to an ``access.log`` FileHandler, we need the +following to add a FileHandler (named ``accesslog``) to the list of +handlers, and ensure that the ``wsgi`` logger is configured and uses +this handler accordingly: + +.. code-block:: ini + + # Begin logging configuration + + [loggers] + keys = root, wsgiapp, wsgi + + [handlers] + keys = console, accesslog + + [logger_wsgi] + level = INFO + handlers = accesslog + qualname = wsgi + propagate = 0 + + [handler_accesslog] + class = FileHandler + args = ('%(here)s/access.log','a') + level = INFO + formatter = generic + +As mentioned above, non-root loggers by default propagate their log records +to the root logger's handlers (currently the console handler). Setting +``propagate`` to ``0`` (``False``) here disables this; so the ``wsgi`` logger +directs its records only to the ``accesslog`` handler. + +Finally, there's no need to use the ``generic`` formatter with +TransLogger, as TransLogger itself provides all the information we +need. We'll use a formatter that passes-through the log messages as +is. Add a new formatter called ``accesslog`` by including the +following in your configuration file: + +.. code-block:: ini + + [formatters] + keys = generic, accesslog + + [formatter_accesslog] + format = %(message)s + +Finally alter the existing configuration to wire this new +``accesslog`` formatter into the FileHandler: + +.. code-block:: ini + + [handler_accesslog] + class = FileHandler + args = ('%(here)s/access.log','a') + level = INFO + formatter = accesslog diff -Nru waitress-1.1.0/docs/reverse-proxy.rst waitress-1.2.0~b2/docs/reverse-proxy.rst --- waitress-1.1.0/docs/reverse-proxy.rst 1970-01-01 00:00:00.000000000 +0000 +++ waitress-1.2.0~b2/docs/reverse-proxy.rst 2019-01-02 20:46:09.000000000 +0000 @@ -0,0 +1,132 @@ +.. index:: reverse, proxy, TLS, SSL, https + +.. _using-behind-a-reverse-proxy: + +============================ +Using Behind a Reverse Proxy +============================ + +Often people will set up "pure Python" web servers behind reverse proxies, +especially if they need TLS support (Waitress does not natively support TLS). +Even if you don't need TLS support, it's not uncommon to see Waitress and +other pure-Python web servers set up to "live" behind a reverse proxy; these +proxies often have lots of useful deployment knobs. + +If you're using Waitress behind a reverse proxy, you'll almost always want +your reverse proxy to pass along the ``Host`` header sent by the client to +Waitress, in either case, as it will be used by most applications to generate +correct URLs. You may also use the proxy headers if passing the Host directly +is not possible, or there are multiple proxies involved. + +For example, when using nginx as a reverse proxy, you might add the following +lines in a ``location`` section. + +.. code-block:: nginx + + proxy_set_header Host $host; + +The Apache directive named ``ProxyPreserveHost`` does something similar when +used as a reverse proxy. + +Unfortunately, even if you pass the ``Host`` header, the Host header does not +contain enough information to regenerate the original URL sent by the client. +For example, if your reverse proxy accepts HTTPS requests (and therefore URLs +which start with ``https://``), the URLs generated by your application when +used behind a reverse proxy served by Waitress might inappropriately be +``http://foo`` rather than ``https://foo``. To fix this, you'll want to +change the ``wsgi.url_scheme`` in the WSGI environment before it reaches your +application. You can do this in one of three ways: + +1. You can pass a ``url_scheme`` configuration variable to the + ``waitress.serve`` function. + +2. You can pass certain well known proxy headers from your proxy server and + use waitress's ``trusted_proxy`` support to automatically configure the + WSGI environment. + +Using ``url_scheme`` to set ``wsgi.url_scheme`` +----------------------------------------------- + +You can have the Waitress server use the ``https`` url scheme by default.: + +.. code-block:: python + + from waitress import serve + serve(wsgiapp, listen='0.0.0.0:8080', url_scheme='https') + +This works if all URLs generated by your application should use the ``https`` +scheme. + +Passing the proxy headers to setup the WSGI environment +------------------------------------------------------- + +If your proxy accepts both HTTP and HTTPS URLs, and you want your application +to generate the appropriate url based on the incoming scheme, you'll want to +pass waitress ``X-Forwarded-Proto``, however Waitress is also able to update +the environment using ``X-Forwarded-Proto``, ``X-Forwarded-For``, +``X-Forwarded-Host``, and ``X-Forwarded-Port``:: + + proxy_set_header X-Forwarded-Proto $scheme; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Host $host:$server_port; + proxy_set_header X-Forwarded-Port $server_port; + +when using Apache, ``mod_proxy`` automatically forwards the following headers:: + + X-Forwarded-For + X-Forwarded-Host + X-Forwarded-Server + +You will also want to add to Apache:: + + RequestHeader set X-Forwarded-Proto https + +Configure waitress's ``trusted_proxy_headers`` as appropriate:: + + trusted_proxy_headers = "x-forwarded-for, x-forwarded-host, x-forwarded-proto, x-forwarded-port" + +At this point waitress will set up the WSGI environment using the information +specified in the trusted proxy headers. This will setup the following +variables:: + + HTTP_HOST + SERVER_NAME + SERVER_PORT + REMOTE_ADDR + REMOTE_PORT (if available) + wsgi.url_scheme + +Waitress also has support for the `Forwarded (RFC7239) HTTP header +`_ which is better defined than the ad-hoc +``X-Forwarded-*``, however support is not nearly as widespread yet. +``Forwarded`` supports similar functionality as the different individual +headers, and is mutually exclusive to using the ``X-Forwarded-*`` headers. + +To configure waitress to use the ``Forwarded`` header, set:: + + trusted_proxy_headers = "forwarded" + +.. note:: + + You must also configure the Waitress server's ``trusted_proxy`` to + contain the IP address of the proxy. + + +Using ``url_prefix`` to influence ``SCRIPT_NAME`` and ``PATH_INFO`` +------------------------------------------------------------------- + +You can have the Waitress server use a particular url prefix by default for all +URLs generated by downstream applications that take ``SCRIPT_NAME`` into +account.: + +.. code-block:: python + + from waitress import serve + serve(wsgiapp, listen='0.0.0.0:8080', url_prefix='/foo') + +Setting this to any value except the empty string will cause the WSGI +``SCRIPT_NAME`` value to be that value, minus any trailing slashes you add, and +it will cause the ``PATH_INFO`` of any request which is prefixed with this +value to be stripped of the prefix. This is useful in proxying scenarios where +you wish to forward all traffic to a Waitress server but need URLs generated by +downstream applications to be prefixed with a particular path segment. diff -Nru waitress-1.1.0/docs/runner.rst waitress-1.2.0~b2/docs/runner.rst --- waitress-1.1.0/docs/runner.rst 2017-10-11 00:59:52.000000000 +0000 +++ waitress-1.2.0~b2/docs/runner.rst 2019-01-02 20:46:09.000000000 +0000 @@ -3,14 +3,10 @@ waitress-serve -------------- -Waitress comes bundled with a thin command-line wrapper around the -``waitress.serve`` function called ``waitress-serve``. This is useful for -development, and in production situations where serving of static assets is -delegated to a reverse proxy, such as Nginx or Apache. +.. versionadded:: 0.8.4 -.. note:: - - This feature is new as of Waitress 0.8.4. + Waitress comes bundled with a thin command-line wrapper around the ``waitress.serve`` function called ``waitress-serve``. + This is useful for development, and in production situations where serving of static assets is delegated to a reverse proxy, such as nginx or Apache. ``waitress-serve`` takes the very same :ref:`arguments ` as the ``waitress.serve`` function, but where the function's arguments have @@ -159,7 +155,7 @@ this. Default is 524288 (512KB). ``--connection-limit=INT`` - Stop creating new channelse if too many are already active. Default is + Stop creating new channels if too many are already active. Default is 100. ``--cleanup-interval=INT`` @@ -168,11 +164,11 @@ ``--channel-timeout=INT`` Maximum number of seconds to leave inactive connections open. Default is - 120. 'Inactive' is defined as 'has recieved no data from the client and has + 120. 'Inactive' is defined as 'has received no data from the client and has sent no data to the client'. ``--[no-]log-socket-errors`` - Toggle whether premature client disconnect tracepacks ought to be logged. + Toggle whether premature client disconnect tracebacks ought to be logged. On by default. ``--max-request-header-size=INT`` diff -Nru waitress-1.1.0/docs/socket-activation.rst waitress-1.2.0~b2/docs/socket-activation.rst --- waitress-1.1.0/docs/socket-activation.rst 1970-01-01 00:00:00.000000000 +0000 +++ waitress-1.2.0~b2/docs/socket-activation.rst 2019-01-02 20:46:09.000000000 +0000 @@ -0,0 +1,45 @@ +Socket Activation +----------------- + +While waitress does not support the various implementations of socket activation, +for example using systemd or launchd, it is prepared to receive pre-bound sockets +from init systems, process and socket managers, or other launchers that can provide +pre-bound sockets. + +The following shows a code example starting waitress with two pre-bound Internet sockets. + +.. code-block:: python + + import socket + import waitress + + + def app(environ, start_response): + content_length = environ.get('CONTENT_LENGTH', None) + if content_length is not None: + content_length = int(content_length) + body = environ['wsgi.input'].read(content_length) + content_length = str(len(body)) + start_response( + '200 OK', + [('Content-Length', content_length), ('Content-Type', 'text/plain')] + ) + return [body] + + + if __name__ == '__main__': + sockets = [ + socket.socket(socket.AF_INET, socket.SOCK_STREAM), + socket.socket(socket.AF_INET, socket.SOCK_STREAM)] + sockets[0].bind(('127.0.0.1', 8080)) + sockets[1].bind(('127.0.0.1', 9090)) + waitress.serve(app, sockets=sockets) + for socket in sockets: + socket.close() + +Generally, to implement socket activation for a given init system, a wrapper +script uses the init system specific libraries to retrieve the sockets from +the init system. Afterwards it starts waitress, passing the sockets with the parameter +``sockets``. Note that the sockets have to be bound, which all init systems +supporting socket activation do. + diff -Nru waitress-1.1.0/docs/usage.rst waitress-1.2.0~b2/docs/usage.rst --- waitress-1.1.0/docs/usage.rst 1970-01-01 00:00:00.000000000 +0000 +++ waitress-1.2.0~b2/docs/usage.rst 2019-01-02 20:46:09.000000000 +0000 @@ -0,0 +1,83 @@ +.. _usage: + +===== +Usage +===== + +The following code will run waitress on port 8080 on all available IP addresses, both IPv4 and IPv6. + +.. code-block:: python + + from waitress import serve + serve(wsgiapp, listen='*:8080') + +Press :kbd:`Ctrl-C` (or :kbd:`Ctrl-Break` on Windows) to exit the server. + +The following will run waitress on port 8080 on all available IPv4 addresses, but not IPv6. + +.. code-block:: python + + from waitress import serve + serve(wsgiapp, host='0.0.0.0', port=8080) + +By default Waitress binds to any IPv4 address on port 8080. +You can omit the ``host`` and ``port`` arguments and just call ``serve`` with the WSGI app as a single argument: + +.. code-block:: python + + from waitress import serve + serve(wsgiapp) + +If you want to serve your application through a UNIX domain socket (to serve a downstream HTTP server/proxy such as nginx, lighttpd, and so on), call ``serve`` with the ``unix_socket`` argument: + +.. code-block:: python + + from waitress import serve + serve(wsgiapp, unix_socket='/path/to/unix.sock') + +Needless to say, this configuration won't work on Windows. + +Exceptions generated by your application will be shown on the console by +default. See :ref:`access-logging` to change this. + +There's an entry point for :term:`PasteDeploy` (``egg:waitress#main``) that +lets you use Waitress's WSGI gateway from a configuration file, e.g.: + +.. code-block:: ini + + [server:main] + use = egg:waitress#main + listen = 127.0.0.1:8080 + +Using ``host`` and ``port`` is also supported: + +.. code-block:: ini + + [server:main] + host = 127.0.0.1 + port = 8080 + +The :term:`PasteDeploy` syntax for UNIX domain sockets is analagous: + +.. code-block:: ini + + [server:main] + use = egg:waitress#main + unix_socket = /path/to/unix.sock + +You can find more settings to tweak (arguments to ``waitress.serve`` or +equivalent settings in PasteDeploy) in :ref:`arguments`. + +Additionally, there is a command line runner called ``waitress-serve``, which +can be used in development and in situations where the likes of +:term:`PasteDeploy` is not necessary: + +.. code-block:: bash + + # Listen on both IPv4 and IPv6 on port 8041 + waitress-serve --listen=*:8041 myapp:wsgifunc + + # Listen on only IPv4 on port 8041 + waitress-serve --port=8041 myapp:wsgifunc + +For more information on this, see :ref:`runner`. diff -Nru waitress-1.1.0/HISTORY.txt waitress-1.2.0~b2/HISTORY.txt --- waitress-1.1.0/HISTORY.txt 2017-10-11 00:59:52.000000000 +0000 +++ waitress-1.2.0~b2/HISTORY.txt 2019-01-02 20:46:09.000000000 +0000 @@ -1,3 +1,98 @@ +1.1.0 (2017-10-10) +------------------ + +Features +~~~~~~~~ + +- Waitress now has a __main__ and thus may be called with ``python -mwaitress`` + +Bugfixes +~~~~~~~~ + +- Waitress no longer allows lowercase HTTP verbs. This change was made to fall + in line with most HTTP servers. See https://github.com/Pylons/waitress/pull/170 + +- When receiving non-ascii bytes in the request URL, waitress will no longer + abruptly close the connection, instead returning a 400 Bad Request. See + https://github.com/Pylons/waitress/pull/162 and + https://github.com/Pylons/waitress/issues/64 + +1.0.2 (2017-02-04) +------------------ + +Features +~~~~~~~~ + +- Python 3.6 is now officially supported in Waitress + +Bugfixes +~~~~~~~~ + +- Add a work-around for libc issue on Linux not following the documented + standards. If getnameinfo() fails because of DNS not being available it + should return the IP address instead of the reverse DNS entry, however + instead getnameinfo() raises. We catch this, and ask getnameinfo() + for the same information again, explicitly asking for IP address instead of + reverse DNS hostname. See https://github.com/Pylons/waitress/issues/149 and + https://github.com/Pylons/waitress/pull/153 + +1.0.1 (2016-10-22) +------------------ + +Bugfixes +~~~~~~~~ + +- IPv6 support on Windows was broken due to missing constants in the socket + module. This has been resolved by setting the constants on Windows if they + are missing. See https://github.com/Pylons/waitress/issues/138 + +- A ValueError was raised on Windows when passing a string for the port, on + Windows in Python 2 using service names instead of port numbers doesn't work + with `getaddrinfo`. This has been resolved by attempting to convert the port + number to an integer, if that fails a ValueError will be raised. See + https://github.com/Pylons/waitress/issues/139 + + +1.0.0 (2016-08-31) +------------------ + +Bugfixes +~~~~~~~~ + +- Removed `AI_ADDRCONFIG` from the call to `getaddrinfo`, this resolves an + issue whereby `getaddrinfo` wouldn't return any addresses to `bind` to on + hosts where there is no internet connection but localhost is requested to be + bound to. See https://github.com/Pylons/waitress/issues/131 for more + information. + +Deprecations +~~~~~~~~~~~~ + +- Python 2.6 is no longer supported. + +Features +~~~~~~~~ + +- IPv6 support + +- Waitress is now able to listen on multiple sockets, including IPv4 and IPv6. + Instead of passing in a host/port combination you now provide waitress with a + space delineated list, and it will create as many sockets as required. + + .. code-block:: python + + from waitress import serve + serve(wsgiapp, listen='0.0.0.0:8080 [::]:9090 *:6543') + +Security +~~~~~~~~ + +- Waitress will now drop HTTP headers that contain an underscore in the key + when received from a client. This is to stop any possible underscore/dash + conflation that may lead to security issues. See + https://github.com/Pylons/waitress/pull/80 and + https://www.djangoproject.com/weblog/2015/jan/13/security/ + 0.9.0 (2016-04-15) ------------------ @@ -223,7 +318,7 @@ Bug Fixes ~~~~~~~~~ -- http://corte.si/posts/code/pathod/pythonservers/index.html pointed out that +- https://corte.si/posts/code/pathod/pythonservers/index.html pointed out that sending a bad header resulted in an exception leading to a 500 response instead of the more proper 400 response without an exception. @@ -254,7 +349,7 @@ ~~~~~~~~ - Support the WSGI ``wsgi.file_wrapper`` protocol as per - http://www.python.org/dev/peps/pep-0333/#optional-platform-specific-file-handling. + https://www.python.org/dev/peps/pep-0333/#optional-platform-specific-file-handling. Here's a usage example:: import os diff -Nru waitress-1.1.0/README.rst waitress-1.2.0~b2/README.rst --- waitress-1.1.0/README.rst 2017-10-11 00:59:52.000000000 +0000 +++ waitress-1.2.0~b2/README.rst 2019-01-02 20:46:09.000000000 +0000 @@ -1,8 +1,26 @@ +Waitress +======== + +.. image:: https://img.shields.io/pypi/v/waitress.svg + :target: https://pypi.org/project/waitress/ + :alt: latest version of waitress on PyPI + +.. image:: https://travis-ci.org/Pylons/waitress.png?branch=master + :target: https://travis-ci.org/Pylons/waitress + :alt: Travis CI for waitress (master branch) + +.. image:: https://readthedocs.org/projects/waitress/badge/?version=master + :target: https://docs.pylonsproject.org/projects/waitress/en/master + :alt: master Documentation Status + +.. image:: https://img.shields.io/badge/irc-freenode-blue.svg + :target: https://webchat.freenode.net/?channels=pyramid + :alt: IRC Freenode + Waitress is meant to be a production-quality pure-Python WSGI server with very acceptable performance. It has no dependencies except ones which live in the Python standard library. It runs on CPython on Unix and Windows under Python -2.7+ and Python 3.3+. It is also known to run on PyPy 1.6.0+ on UNIX. It +2.7+ and Python 3.4+. It is also known to run on PyPy 1.6.0+ on UNIX. It supports HTTP/1.0 and HTTP/1.1. -For more information, see the "docs" directory of the Waitress package or -http://docs.pylonsproject.org/projects/waitress/en/latest/ . +For more information, see the "docs" directory of the Waitress package or visit https://docs.pylonsproject.org/projects/waitress/en/latest/ diff -Nru waitress-1.1.0/RELEASING.txt waitress-1.2.0~b2/RELEASING.txt --- waitress-1.1.0/RELEASING.txt 1970-01-01 00:00:00.000000000 +0000 +++ waitress-1.2.0~b2/RELEASING.txt 2019-01-02 20:46:09.000000000 +0000 @@ -0,0 +1,115 @@ +Releasing +========= + +- For clarity, we define releases as follows. + + - Alpha, beta, dev and similar statuses do not qualify whether a release is + major or minor. The term "pre-release" means alpha, beta, or dev. + + - A release is final when it is no longer pre-release. + + - A *major* release is where the first number either before or after the + first dot increases. Examples: 1.0 to 1.1a1, or 0.9 to 1.0. + + - A *minor* or *bug fix* release is where the number after the second dot + increases. Example: 1.0 to 1.0.1. + +Prepare new release +------------------- + +- Do platform test via tox: + + $ tox -r + + Make sure statement coverage is at 100% (the test run will fail if not). + +- Run tests on Windows if feasible. + +- Ensure all features of the release are documented (audit CHANGES.txt or + communicate with contributors). + +- Change CHANGES.txt heading to reflect the new version number. + +- Minor releases should include a link under "Bug Fix Releases" to the minor + feature changes in CHANGES.txt. + +- Change setup.py version to the release version number. + +- Make sure PyPI long description renders (requires ``readme_renderer`` + installed into your Python):: + + $ python setup.py check -r -s -m + +- Create a release tag. + +- Make sure your Python has ``setuptools-git``, ``twine``, and ``wheel`` + installed and release to PyPI:: + + $ python setup.py sdist bdist_wheel + $ twine upload dist/waitress-X.X-* + + +Prepare master for further development (major releases only) +------------------------------------------------------------ + +- In CHANGES.txt, preserve headings but clear out content. Add heading + "unreleased" for the version number. + +- Forward port the changes in CHANGES.txt to HISTORY.txt. + +- Change setup.py version to the next version number. + + +Marketing and communications +---------------------------- + +- Check `https://wiki.python.org/moin/WebServers + `_. + +- Announce to Twitter. + +``` +waitress 1.x released. + +PyPI +https://pypi.org/project/waitress/1.x/ + +=== One time only for new version, first pre-release === +What's New +https://docs.pylonsproject.org/projects/waitress/en/latest/#id2 +=== For all subsequent pre-releases === +Changes +https://docs.pylonsproject.org/projects/waitress/en/latest/#change-history + +Documentation: +https://docs.pylonsproject.org/projects/waitress/en/latest/ + +Issues +https://github.com/Pylons/waitress/issues +``` + +- Announce to maillist. + +``` +waitress 1.X.X has been released. + +The full changelog is here: +https://docs.pylonsproject.org/projects/waitress/en/latest/#change-history + +What's New In waitress 1.X: +https://docs.pylonsproject.org/projects/waitress/en/latest/#id2 + +Documentation: +https://docs.pylonsproject.org/projects/waitress/en/latest/ + +You can install it via PyPI: + + pip install waitress==1.X + +Enjoy, and please report any issues you find to the issue tracker at +https://github.com/Pylons/waitress/issues + +Thanks! + +- waitress core developers +``` diff -Nru waitress-1.1.0/setup.py waitress-1.2.0~b2/setup.py --- waitress-1.1.0/setup.py 2017-10-11 00:59:52.000000000 +0000 +++ waitress-1.2.0~b2/setup.py 2019-01-02 20:46:09.000000000 +0000 @@ -22,9 +22,9 @@ README = CHANGES = '' docs_extras = [ - 'Sphinx', + 'Sphinx>=1.8.1', 'docutils', - 'pylons-sphinx-themes >= 0.3', + 'pylons-sphinx-themes>=1.0.9', ] testing_extras = [ @@ -34,7 +34,7 @@ setup( name='waitress', - version='1.1.0', + version='1.2.0b2', author='Zope Foundation and Contributors', author_email='zope-dev@zope.org', maintainer="Pylons Project", @@ -52,15 +52,16 @@ 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', - 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', - "Programming Language :: Python :: Implementation :: CPython", - "Programming Language :: Python :: Implementation :: PyPy", + 'Programming Language :: Python :: 3.7', + 'Programming Language :: Python :: Implementation :: CPython', + 'Programming Language :: Python :: Implementation :: PyPy', 'Natural Language :: English', 'Operating System :: OS Independent', 'Topic :: Internet :: WWW/HTTP', + 'Topic :: Internet :: WWW/HTTP :: WSGI', ], url='https://github.com/Pylons/waitress', packages=find_packages(), diff -Nru waitress-1.1.0/tox.ini waitress-1.2.0~b2/tox.ini --- waitress-1.1.0/tox.ini 2017-10-11 00:59:52.000000000 +0000 +++ waitress-1.2.0~b2/tox.ini 2019-01-02 20:46:09.000000000 +0000 @@ -1,6 +1,6 @@ [tox] envlist = - py27,py33,py34,py35,py36,pypy, + py27,py34,py35,py36,py37,pypy, docs, {py2,py3}-cover,coverage @@ -9,10 +9,11 @@ # to defaults for others. basepython = py27: python2.7 - py33: python3.3 py34: python3.4 py35: python3.5 py36: python3.6 + py37: python3.7 + py38: python3.8 pypy: pypy py2: python2.7 py3: python3.5 diff -Nru waitress-1.1.0/.travis.yml waitress-1.2.0~b2/.travis.yml --- waitress-1.1.0/.travis.yml 2017-10-11 00:59:52.000000000 +0000 +++ waitress-1.2.0~b2/.travis.yml 2019-01-02 20:46:09.000000000 +0000 @@ -6,24 +6,28 @@ include: - python: 2.7 env: TOXENV=py27 - - python: 3.3 - env: TOXENV=py33 - python: 3.4 env: TOXENV=py34 - python: 3.5 env: TOXENV=py35 - python: 3.6 env: TOXENV=py36 + - python: 3.7 + env: TOXENV=py37 + dist: xenial + sudo: true + - python: 3.8-dev + env: TOXENV=py38 + dist: xenial + sudo: true - python: pypy env: TOXENV=pypy - - python: pypy3 - env: TOXENV=pypy3 - python: 3.5 env: TOXENV=py2-cover,py3-cover,coverage - python: 3.5 env: TOXENV=docs allow_failures: - - env: TOXENV=pypy3 + - env: TOXENV=py38 install: - travis_retry pip install tox diff -Nru waitress-1.1.0/waitress/adjustments.py waitress-1.2.0~b2/waitress/adjustments.py --- waitress-1.1.0/waitress/adjustments.py 2017-10-11 00:59:52.000000000 +0000 +++ waitress-1.2.0~b2/waitress/adjustments.py 2019-01-02 20:46:09.000000000 +0000 @@ -15,8 +15,10 @@ """ import getopt import socket +import warnings -from waitress.compat import ( +from .utilities import PROXY_HEADERS +from .compat import ( PY2, WIN, string_types, @@ -25,6 +27,8 @@ truthy = frozenset(('t', 'true', 'y', 'yes', 'on', '1')) +KNOWN_PROXY_HEADERS = {header.lower().replace('_', '-') for header in PROXY_HEADERS} + def asbool(s): """ Return the boolean value ``True`` if the case-lowered value of string input ``s`` is any of ``t``, ``true``, ``y``, ``on``, or ``1``, otherwise @@ -58,6 +62,9 @@ result.extend(subvalues) return result +def asset(value): + return set(aslist(value)) + def slash_fixed_str(s): s = s.strip() if s: @@ -66,12 +73,23 @@ s = '/' + s.lstrip('/').rstrip('/') return s +def str_iftruthy(s): + return str(s) if s else None + +def as_socket_list(sockets): + """Checks if the elements in the list are of type socket and + removes them if not.""" + return [sock for sock in sockets if isinstance(sock, socket.socket)] + class _str_marker(str): pass class _int_marker(int): pass +class _bool_marker(object): + pass + class Adjustments(object): """This class contains tunable parameters. """ @@ -83,7 +101,11 @@ ('ipv6', asbool), ('listen', aslist), ('threads', int), - ('trusted_proxy', str), + ('trusted_proxy', str_iftruthy), + ('trusted_proxy_count', int), + ('trusted_proxy_headers', asset), + ('log_untrusted_proxy_headers', asbool), + ('clear_untrusted_proxy_headers', asbool), ('url_scheme', str), ('url_prefix', slash_fixed_str), ('backlog', int), @@ -98,11 +120,12 @@ ('max_request_header_size', int), ('max_request_body_size', int), ('expose_tracebacks', asbool), - ('ident', str), + ('ident', str_iftruthy), ('asyncore_loop_timeout', int), ('asyncore_use_poll', asbool), ('unix_socket', str), ('unix_socket_perms', asoctal), + ('sockets', as_socket_list), ) _param_map = dict(_params) @@ -115,12 +138,41 @@ listen = ['{}:{}'.format(host, port)] - # mumber of threads available for tasks + # number of threads available for tasks threads = 4 # Host allowed to overrid ``wsgi.url_scheme`` via header trusted_proxy = None + # How many proxies we trust when chained + # + # X-Forwarded-For: 192.0.2.1, "[2001:db8::1]" + # + # or + # + # Forwarded: for=192.0.2.1, For="[2001:db8::1]" + # + # means there were (potentially), two proxies involved. If we know there is + # only 1 valid proxy, then that initial IP address "192.0.2.1" is not + # trusted and we completely ignore it. If there are two trusted proxies in + # the path, this value should be set to a higher number. + trusted_proxy_count = 1 + + # Which of the proxy headers should we trust, this is a set where you + # either specify forwarded or one or more of forwarded-host, forwarded-for, + # forwarded-proto, forwarded-port. + trusted_proxy_headers = set() + + # Would you like waitress to log warnings about untrusted proxy headers + # that were encountered while processing the proxy headers? This only makes + # sense to set when you have a trusted_proxy, and you expect the upstream + # proxy server to filter invalid headers + log_untrusted_proxy_headers = False + + # Should waitress clear any proxy headers that are not deemed trusted from + # the environ? Change to True by default in 2.x + clear_untrusted_proxy_headers = _bool_marker + # default ``wsgi.url_scheme`` value url_scheme = 'http' @@ -213,10 +265,29 @@ # Enable IPv6 by default ipv6 = True + # A list of sockets that waitress will use to accept connections. They can + # be used for e.g. socket activation + sockets = [] + def __init__(self, **kw): if 'listen' in kw and ('host' in kw or 'port' in kw): - raise ValueError('host and or port may not be set if listen is set.') + raise ValueError('host or port may not be set if listen is set.') + + if 'listen' in kw and 'sockets' in kw: + raise ValueError('socket may not be set if listen is set.') + + if 'sockets' in kw and ('host' in kw or 'port' in kw): + raise ValueError('host or port may not be set if sockets is set.') + + if 'sockets' in kw and 'unix_socket' in kw: + raise ValueError('unix_socket may not be set if sockets is set') + + if 'unix_socket' in kw and ('host' in kw or 'port' in kw): + raise ValueError('unix_socket may not be set if host or port is set') + + if 'unix_socket' in kw and 'listen' in kw: + raise ValueError('unix_socket may not be set if listen is set') for k, v in kw.items(): if k not in self._param_map: @@ -296,8 +367,60 @@ except: raise ValueError('Invalid host/port specified.') + if ( + self.trusted_proxy is None and + ( + self.trusted_proxy_headers or + (self.clear_untrusted_proxy_headers is not _bool_marker) + ) + ): + raise ValueError( + "The values trusted_proxy_headers and clear_untrusted_proxy_headers " + "have no meaning without setting trusted_proxy. Cowardly refusing to " + "continue." + ) + + if self.trusted_proxy_headers: + self.trusted_proxy_headers = {header.lower() for header in self.trusted_proxy_headers} + + unknown_values = self.trusted_proxy_headers - KNOWN_PROXY_HEADERS + if unknown_values: + raise ValueError( + "Received unknown trusted_proxy_headers value (%s) expected one " + "of %s" % (", ".join(unknown_values), ", ".join(KNOWN_PROXY_HEADERS)) + ) + + if ( + 'forwarded' in self.trusted_proxy_headers and + self.trusted_proxy_headers - {'forwarded'} + ): + raise ValueError( + "The Forwarded proxy header and the " + "X-Forwarded-{By,Host,Proto,Port,For} headers are mutually " + "exclusive. Can't trust both!" + ) + elif self.trusted_proxy is not None: + warnings.warn( + 'No proxy headers were marked as trusted, but trusted_proxy was set. ' + 'Implicitly trusting X-Forwarded-Proto for backwards compatibility. ' + 'This will be removed in future versions of waitress.', + DeprecationWarning + ) + self.trusted_proxy_headers = {'x-forwarded-proto'} + + if self.trusted_proxy and self.clear_untrusted_proxy_headers is _bool_marker: + warnings.warn( + 'In future versions of Waitress clear_untrusted_proxy_headers will be ' + 'set to True by default. You may opt-out by setting this value to ' + 'False, or opt-in explicitly by setting this to True.', + DeprecationWarning + ) + self.clear_untrusted_proxy_headers = False + self.listen = wanted_sockets + self.check_sockets(self.sockets) + @classmethod def parse_args(cls, argv): """Pre-parse command line arguments for input into __init__. Note that @@ -338,3 +461,23 @@ kw[param] = value return kw, args + + @classmethod + def check_sockets(cls, sockets): + has_unix_socket = False + has_inet_socket = False + has_unsupported_socket = False + for sock in sockets: + if (sock.family == socket.AF_INET or sock.family == socket.AF_INET6) and \ + sock.type == socket.SOCK_STREAM: + has_inet_socket = True + elif hasattr(socket, 'AF_UNIX') and \ + sock.family == socket.AF_UNIX and \ + sock.type == socket.SOCK_STREAM: + has_unix_socket = True + else: + has_unsupported_socket = True + if has_unix_socket and has_inet_socket: + raise ValueError('Internet and UNIX sockets may not be mixed.') + if has_unsupported_socket: + raise ValueError('Only Internet or UNIX stream sockets may be used.') diff -Nru waitress-1.1.0/waitress/channel.py waitress-1.2.0~b2/waitress/channel.py --- waitress-1.1.0/waitress/channel.py 2017-10-11 00:59:52.000000000 +0000 +++ waitress-1.2.0~b2/waitress/channel.py 2019-01-02 20:46:09.000000000 +0000 @@ -11,7 +11,6 @@ # FOR A PARTICULAR PURPOSE. # ############################################################################## -import asyncore import socket import threading import time @@ -29,12 +28,11 @@ WSGITask, ) -from waitress.utilities import ( - logging_dispatcher, - InternalServerError, -) +from waitress.utilities import InternalServerError + +from . import wasyncore -class HTTPChannel(logging_dispatcher, object): +class HTTPChannel(wasyncore.dispatcher, object): """ Setting self.requests = [somerequest] prevents more requests from being received until the out buffers have been flushed. @@ -76,9 +74,9 @@ # outbuf_lock used to access any outbuf self.outbuf_lock = threading.Lock() - asyncore.dispatcher.__init__(self, sock, map=map) + wasyncore.dispatcher.__init__(self, sock, map=map) - # Don't let asyncore.dispatcher throttle self.addr on us. + # Don't let wasyncore.dispatcher throttle self.addr on us. self.addr = addr def any_outbuf_has_data(self): @@ -281,23 +279,23 @@ self.logger.exception( 'Unknown exception while trying to close outbuf') self.connected = False - asyncore.dispatcher.close(self) + wasyncore.dispatcher.close(self) def add_channel(self, map=None): - """See asyncore.dispatcher + """See wasyncore.dispatcher This hook keeps track of opened channels. """ - asyncore.dispatcher.add_channel(self, map) + wasyncore.dispatcher.add_channel(self, map) self.server.active_channels[self._fileno] = self def del_channel(self, map=None): - """See asyncore.dispatcher + """See wasyncore.dispatcher This hook keeps track of closed channels. """ fd = self._fileno # next line sets this to None - asyncore.dispatcher.del_channel(self, map) + wasyncore.dispatcher.del_channel(self, map) ac = self.server.active_channels if fd in ac: del ac[fd] diff -Nru waitress-1.1.0/waitress/compat.py waitress-1.2.0~b2/waitress/compat.py --- waitress-1.1.0/waitress/compat.py 2017-10-11 00:59:52.000000000 +0000 +++ waitress-1.2.0~b2/waitress/compat.py 2019-01-02 20:46:09.000000000 +0000 @@ -1,3 +1,4 @@ +import os import sys import types import platform @@ -8,6 +9,11 @@ except ImportError: # pragma: no cover from urllib import parse as urlparse +try: + import fcntl +except ImportError: # pragma: no cover + fcntl = None # windows + # True if we are running on Python 3. PY2 = sys.version_info[0] == 2 PY3 = sys.version_info[0] == 3 @@ -138,3 +144,30 @@ RuntimeWarning ) HAS_IPV6 = False + +def set_nonblocking(fd): # pragma: no cover + if PY3 and sys.version_info[1] >= 5: + os.set_blocking(fd, False) + elif fcntl is None: + raise RuntimeError('no fcntl module present') + else: + flags = fcntl.fcntl(fd, fcntl.F_GETFL, 0) + flags = flags | os.O_NONBLOCK + fcntl.fcntl(fd, fcntl.F_SETFL, flags) + +if PY3: + ResourceWarning = ResourceWarning +else: + ResourceWarning = UserWarning + +def qualname(cls): + if PY3: + return cls.__qualname__ + return cls.__name__ + +try: + import thread +except ImportError: + # py3 + import _thread as thread + diff -Nru waitress-1.1.0/waitress/parser.py waitress-1.2.0~b2/waitress/parser.py --- waitress-1.1.0/waitress/parser.py 2017-10-11 00:59:52.000000000 +0000 +++ waitress-1.2.0~b2/waitress/parser.py 2019-01-02 20:46:09.000000000 +0000 @@ -274,7 +274,7 @@ for line in lines: if line.startswith((b' ', b'\t')): if not r: - # http://corte.si/posts/code/pathod/pythonservers/index.html + # https://corte.si/posts/code/pathod/pythonservers/index.html raise ParsingError('Malformed header line "%s"' % tostr(line)) r[-1] += line else: diff -Nru waitress-1.1.0/waitress/server.py waitress-1.2.0~b2/waitress/server.py --- waitress-1.1.0/waitress/server.py 2017-10-11 00:59:52.000000000 +0000 +++ waitress-1.2.0~b2/waitress/server.py 2019-01-02 20:46:09.000000000 +0000 @@ -12,7 +12,6 @@ # ############################################################################## -import asyncore import os import os.path import socket @@ -22,14 +21,13 @@ from waitress.adjustments import Adjustments from waitress.channel import HTTPChannel from waitress.task import ThreadedTaskDispatcher -from waitress.utilities import ( - cleanup_unix_socket, - logging_dispatcher, - ) +from waitress.utilities import cleanup_unix_socket + from waitress.compat import ( IPPROTO_IPV6, IPV6_V6ONLY, ) +from . import wasyncore def create_server(application, map=None, @@ -71,23 +69,49 @@ effective_listen = [] last_serv = None - for sockinfo in adj.listen: - # When TcpWSGIServer is called, it registers itself in the map. This - # side-effect is all we need it for, so we don't store a reference to - # or return it to the user. - last_serv = TcpWSGIServer( - application, - map, - _start, - _sock, - dispatcher=dispatcher, - adj=adj, - sockinfo=sockinfo) - effective_listen.append((last_serv.effective_host, last_serv.effective_port)) + if not adj.sockets: + for sockinfo in adj.listen: + # When TcpWSGIServer is called, it registers itself in the map. This + # side-effect is all we need it for, so we don't store a reference to + # or return it to the user. + last_serv = TcpWSGIServer( + application, + map, + _start, + _sock, + dispatcher=dispatcher, + adj=adj, + sockinfo=sockinfo) + effective_listen.append((last_serv.effective_host, last_serv.effective_port)) + + for sock in adj.sockets: + sockinfo = (sock.family, sock.type, sock.proto, sock.getsockname()) + if sock.family == socket.AF_INET or sock.family == socket.AF_INET6: + last_serv = TcpWSGIServer( + application, + map, + _start, + sock, + dispatcher=dispatcher, + adj=adj, + bind_socket=False, + sockinfo=sockinfo) + effective_listen.append((last_serv.effective_host, last_serv.effective_port)) + elif hasattr(socket, 'AF_UNIX') and sock.family == socket.AF_UNIX: + last_serv = UnixWSGIServer( + application, + map, + _start, + sock, + dispatcher=dispatcher, + adj=adj, + bind_socket=False, + sockinfo=sockinfo) + effective_listen.append((last_serv.effective_host, last_serv.effective_port)) # We are running a single server, so we can just return the last server, # saves us from having to create one more object - if len(adj.listen) == 1: + if len(effective_listen) == 1: # In this case we have no need to use a MultiSocketServer return last_serv @@ -98,10 +122,10 @@ # This class is only ever used if we have multiple listen sockets. It allows -# the serve() API to call .run() which starts the asyncore loop, and catches +# the serve() API to call .run() which starts the wasyncore loop, and catches # SystemExit/KeyboardInterrupt so that it can atempt to cleanly shut down. class MultiSocketServer(object): - asyncore = asyncore # test shim + asyncore = wasyncore # test shim def __init__(self, map=None, @@ -131,15 +155,19 @@ use_poll=self.adj.asyncore_use_poll, ) except (SystemExit, KeyboardInterrupt): - self.task_dispatcher.shutdown() + self.close() + + def close(self): + self.task_dispatcher.shutdown() + wasyncore.close_all(self.map) -class BaseWSGIServer(logging_dispatcher, object): +class BaseWSGIServer(wasyncore.dispatcher, object): channel_class = HTTPChannel next_channel_cleanup = 0 socketmod = socket # test shim - asyncore = asyncore # test shim + asyncore = wasyncore # test shim def __init__(self, application, @@ -149,13 +177,14 @@ dispatcher=None, # dispatcher adj=None, # adjustments sockinfo=None, # opaque object + bind_socket=True, **kw ): if adj is None: adj = Adjustments(**kw) if map is None: # use a nonglobal socket map by default to hopefully prevent - # conflicts with apps and libs that use the asyncore global socket + # conflicts with apps and libs that use the wasyncore global socket # map ala https://github.com/Pylons/waitress/issues/63 map = {} if sockinfo is None: @@ -179,7 +208,10 @@ self.socket.setsockopt(IPPROTO_IPV6, IPV6_V6ONLY, 1) self.set_reuse_addr() - self.bind_server_socket() + + if bind_socket: + self.bind_server_socket() + self.effective_host, self.effective_port = self.getsockname() self.server_name = self.get_server_name(self.effective_host) self.active_channels = {} @@ -191,21 +223,35 @@ def get_server_name(self, ip): """Given an IP or hostname, try to determine the server name.""" - if ip: - server_name = str(ip) - else: - server_name = str(self.socketmod.gethostname()) - - # Convert to a host name if necessary. - for c in server_name: - if c != '.' and not c.isdigit(): - return server_name - try: - if server_name == '0.0.0.0' or server_name == '::': + + if not ip: + raise ValueError('Requires an IP to get the server name') + + server_name = str(ip) + + # If we are bound to all IP's, just return the current hostname, only + # fall-back to "localhost" if we fail to get the hostname + if server_name == '0.0.0.0' or server_name == '::': + try: + return str(self.socketmod.gethostname()) + except (socket.error, UnicodeDecodeError): # pragma: no cover + # We also deal with UnicodeDecodeError in case of Windows with + # non-ascii hostname return 'localhost' + + # Now let's try and convert the IP address to a proper hostname + try: server_name = self.socketmod.gethostbyaddr(server_name)[0] - except socket.error: # pragma: no cover + except (socket.error, UnicodeDecodeError): # pragma: no cover + # We also deal with UnicodeDecodeError in case of Windows with + # non-ascii hostname pass + + # If it contains an IPv6 literal, make sure to surround it with + # brackets + if ':' in server_name and '[' not in server_name: + server_name = '[{}]'.format(server_name) + return server_name def getsockname(self): @@ -286,6 +332,10 @@ def print_listen(self, format_str): # pragma: nocover print(format_str.format(self.effective_host, self.effective_port)) + def close(self): + self.trigger.close() + return wasyncore.dispatcher.close(self) + class TcpWSGIServer(BaseWSGIServer): @@ -353,5 +403,8 @@ def fix_addr(self, addr): return ('localhost', None) + def get_server_name(self, ip): + return 'localhost' + # Compatibility alias. WSGIServer = TcpWSGIServer diff -Nru waitress-1.1.0/waitress/task.py waitress-1.2.0~b2/waitress/task.py --- waitress-1.1.0/waitress/task.py 2017-10-11 00:59:52.000000000 +0000 +++ waitress-1.2.0~b2/waitress/task.py 2019-01-02 20:46:09.000000000 +0000 @@ -17,18 +17,16 @@ import threading import time -from waitress.buffers import ReadOnlyFileBasedBuffer - -from waitress.compat import ( - tobytes, - Queue, - Empty, - reraise, -) - -from waitress.utilities import ( +from .buffers import ReadOnlyFileBasedBuffer +from .compat import Empty, Queue, reraise, tobytes +from .utilities import ( + Forwarded, + PROXY_HEADERS, build_http_date, + clear_untrusted_headers, logger, + queue_logger, + undquote, ) rename_headers = { # or keep them without the HTTP_ prefix added @@ -47,6 +45,7 @@ 'upgrade' )) + class JustTesting(Exception): pass @@ -55,6 +54,7 @@ """ stop_count = 0 # Number of threads that will stop soon. logger = logger + queue_logger = queue_logger def __init__(self): self.threads = {} # { thread number -> 1 } @@ -108,6 +108,11 @@ running -= 1 def add_task(self, task): + queue_depth = self.queue.qsize() + if queue_depth > 0: + self.queue_logger.warning( + "Task queue depth is %d" % + queue_depth) try: task.defer() self.queue.put(task) @@ -148,6 +153,7 @@ content_length = None content_bytes_written = 0 logged_write_excess = False + logged_write_no_body = False complete = False chunked_response = False logger = logger @@ -175,6 +181,13 @@ finally: pass + @property + def has_body(self): + return not (self.status.startswith('1') or + self.status.startswith('204') or + self.status.startswith('304') + ) + def cancel(self): self.close_on_finish = True @@ -185,30 +198,41 @@ version = self.version # Figure out whether the connection should be closed. connection = self.request.headers.get('CONNECTION', '').lower() - response_headers = self.response_headers + response_headers = [] content_length_header = None date_header = None server_header = None connection_close_header = None - for i, (headername, headerval) in enumerate(response_headers): + for (headername, headerval) in self.response_headers: headername = '-'.join( [x.capitalize() for x in headername.split('-')] ) + if headername == 'Content-Length': - content_length_header = headerval + if self.has_body: + content_length_header = headerval + else: + continue # pragma: no cover + if headername == 'Date': date_header = headerval + if headername == 'Server': server_header = headerval + if headername == 'Connection': connection_close_header = headerval.lower() # replace with properly capitalized version - response_headers[i] = (headername, headerval) + response_headers.append((headername, headerval)) - if content_length_header is None and self.content_length is not None: + if ( + content_length_header is None and + self.content_length is not None and + self.has_body + ): content_length_header = str(self.content_length) - self.response_headers.append( + response_headers.append( ('Content-Length', content_length_header) ) @@ -231,8 +255,13 @@ close_on_finish() if not content_length_header: - response_headers.append(('Transfer-Encoding', 'chunked')) - self.chunked_response = True + # RFC 7230: MUST NOT send Transfer-Encoding or Content-Length + # for any response with a status code of 1xx, 204 or 304. + + if self.has_body: + response_headers.append(('Transfer-Encoding', 'chunked')) + self.chunked_response = True + if not self.close_on_finish: close_on_finish() @@ -243,27 +272,38 @@ # Set the Server and Date field, if not yet specified. This is needed # if the server is used as a proxy. ident = self.channel.server.adj.ident + if not server_header: - response_headers.append(('Server', ident)) + if ident: + response_headers.append(('Server', ident)) else: - response_headers.append(('Via', ident)) + response_headers.append(('Via', ident or 'waitress')) + if not date_header: response_headers.append(('Date', build_http_date(self.start_time))) + self.response_headers = response_headers + first_line = 'HTTP/%s %s' % (self.version, self.status) # NB: sorting headers needs to preserve same-named-header order # as per RFC 2616 section 4.2; thus the key=lambda x: x[0] here; # rely on stable sort to keep relative position of same-named headers next_lines = ['%s: %s' % hv for hv in sorted( - self.response_headers, key=lambda x: x[0])] + self.response_headers, key=lambda x: x[0])] lines = [first_line] + next_lines res = '%s\r\n\r\n' % '\r\n'.join(lines) + return tobytes(res) def remove_content_length_header(self): - for i, (header_name, header_value) in enumerate(self.response_headers): + response_headers = [] + + for header_name, header_value in self.response_headers: if header_name.lower() == 'content-length': - del self.response_headers[i] + continue # pragma: nocover + response_headers.append((header_name, header_value)) + + self.response_headers = response_headers def start(self): self.start_time = time.time() @@ -284,7 +324,8 @@ rh = self.build_response_header() channel.write_soon(rh) self.wrote_header = True - if data: + + if data and self.has_body: towrite = data cl = self.content_length if self.chunked_response: @@ -301,6 +342,18 @@ self.logged_write_excess = True if towrite: channel.write_soon(towrite) + elif data: + # Cheat, and tell the application we have written all of the bytes, + # even though the response shouldn't have a body and we are + # ignoring it entirely. + self.content_bytes_written += len(data) + + if not self.logged_write_no_body: + self.logger.warning( + 'application-written content was ignored due to HTTP ' + 'response that may not contain a message-body: (%s)' % self.status) + self.logged_write_no_body = True + class ErrorTask(Task): """ An error task produces an error response @@ -453,6 +506,229 @@ if hasattr(app_iter, 'close'): app_iter.close() + def parse_proxy_headers( + self, + environ, + headers, + trusted_proxy_count=1, + trusted_proxy_headers=None, + ): + if trusted_proxy_headers is None: + trusted_proxy_headers = set() + + forwarded_for = [] + forwarded_host = forwarded_proto = forwarded_port = forwarded = "" + client_addr = None + untrusted_headers = set(PROXY_HEADERS) + + def warn_unspecified_behavior(header): + self.logger.warning( + "Found multiple values in %s, this has unspecified behaviour. " + "Ignoring header value.", + header, + ) + + if "x-forwarded-for" in trusted_proxy_headers and "X_FORWARDED_FOR" in headers: + forwarded_for = [] + + for forward_hop in headers["X_FORWARDED_FOR"].split(","): + forward_hop = forward_hop.strip() + forward_hop = undquote(forward_hop) + + # Make sure that all IPv6 addresses are surrounded by brackets + + if ":" in forward_hop and forward_hop[-1] != "]": + forwarded_for.append("[{}]".format(forward_hop)) + else: + forwarded_for.append(forward_hop) + + forwarded_for = forwarded_for[-trusted_proxy_count:] + client_addr = forwarded_for[0] + + untrusted_headers.remove("X_FORWARDED_FOR") + + if "x-forwarded-host" in trusted_proxy_headers and "X_FORWARDED_HOST" in headers: + forwarded_host_multiple = [] + + for forward_host in headers["X_FORWARDED_HOST"].split(","): + forward_host = forward_host.strip() + forward_host = undquote(forward_host) + forwarded_host_multiple.append(forward_host) + + forwarded_host_multiple = forwarded_host_multiple[-trusted_proxy_count:] + forwarded_host = forwarded_host_multiple[0] + + untrusted_headers.remove("X_FORWARDED_HOST") + + if "x-forwarded-proto" in trusted_proxy_headers: + forwarded_proto = undquote(headers.get("X_FORWARDED_PROTO", "")) + untrusted_headers.remove("X_FORWARDED_PROTO") + + if "," in forwarded_proto: + forwarded_proto = "" + warn_unspecified_behavior("X-Forwarded-Proto") + + if "x-forwarded-port" in trusted_proxy_headers: + forwarded_port = undquote(headers.get("X_FORWARDED_PORT", "")) + untrusted_headers.remove("X_FORWARDED_PORT") + + if "," in forwarded_port: + forwarded_port = "" + warn_unspecified_behavior("X-Forwarded-Port") + + if "x-forwarded-by" in trusted_proxy_headers: + # Waitress itself does not use X-Forwarded-By, but we can not + # remove it so it can get set in the environ + untrusted_headers.remove("X_FORWARDED_BY") + + if "forwarded" in trusted_proxy_headers: + forwarded = headers.get("FORWARDED", None) + untrusted_headers = PROXY_HEADERS - {"FORWARDED"} + + # If the Forwarded header exists, it gets priority + if forwarded: + proxies = [] + + for forwarded_element in forwarded.split(","): + # Remove whitespace that may have been introduced when + # appending a new entry + forwarded_element = forwarded_element.strip() + + forwarded_for = forwarded_host = forwarded_proto = "" + forwarded_port = forwarded_by = "" + + for pair in forwarded_element.split(";"): + pair = pair.lower() + + if not pair: + continue + + token, equals, value = pair.partition("=") + + if equals != "=": + raise ValueError("Invalid forwarded-pair in Forwarded element") + + if token.strip() != token: + raise ValueError("token may not be surrounded by whitespace") + + if value.strip() != value: + raise ValueError("value may not be surrounded by whitespace") + + if token == "by": + forwarded_by = undquote(value) + + elif token == "for": + forwarded_for = undquote(value) + + elif token == "host": + forwarded_host = undquote(value) + + elif token == "proto": + forwarded_proto = undquote(value) + + else: + self.logger.warning("Unknown Forwarded token: %s" % token) + + proxies.append( + Forwarded( + forwarded_by, forwarded_for, forwarded_host, forwarded_proto + ) + ) + + proxies = proxies[-trusted_proxy_count:] + + # Iterate backwards and fill in some values, the oldest entry that + # contains the information we expect is the one we use. We expect + # that intermediate proxies may re-write the host header or proto, + # but the oldest entry is the one that contains the information the + # client expects when generating URL's + # + # Forwarded: for="[2001:db8::1]";host="example.com:8443";proto="https" + # Forwarded: for=192.0.2.1;host="example.internal:8080" + # + # (After HTTPS header folding) should mean that we use as values: + # + # Host: example.com + # Protocol: https + # Port: 8443 + + for proxy in proxies[::-1]: + client_addr = proxy.for_ or client_addr + forwarded_host = proxy.host or forwarded_host + forwarded_proto = proxy.proto or forwarded_proto + + if forwarded_proto: + forwarded_proto = forwarded_proto.lower() + + if forwarded_proto not in {"http", "https"}: + raise ValueError( + 'Invalid "Forwarded Proto=" or "X-Forwarded-Proto" value.' + ) + + # Set the URL scheme to the proxy provided proto + environ["wsgi.url_scheme"] = forwarded_proto + + if not forwarded_port: + if forwarded_proto == "http": + forwarded_port = "80" + + if forwarded_proto == "https": + forwarded_port = "443" + + if forwarded_host: + forwarded_host = forwarded_host.strip() + + if ":" in forwarded_host and forwarded_host[-1] != "]": + host, port = forwarded_host.rsplit(":", 1) + host, port = host.strip(), str(port) + + # We trust the port in the Forwarded Host/X-Forwarded-Host over + # X-Forwarded-Port, or whatever we got from Forwarded + # Proto/X-Forwarded-Proto. + + if forwarded_port != port: + forwarded_port = port + + # We trust the proxy server's forwarded Host + environ["SERVER_NAME"] = host + environ["HTTP_HOST"] = forwarded_host + else: + # We trust the proxy server's forwarded Host + environ["SERVER_NAME"] = forwarded_host + environ["HTTP_HOST"] = forwarded_host + + if forwarded_port: + if forwarded_port not in {"443", "80"}: + environ["HTTP_HOST"] = "{}:{}".format( + forwarded_host, forwarded_port + ) + elif ( + forwarded_port == "80" and environ["wsgi.url_scheme"] != "http" + ): + environ["HTTP_HOST"] = "{}:{}".format( + forwarded_host, forwarded_port + ) + elif ( + forwarded_port == "443" + and environ["wsgi.url_scheme"] != "https" + ): + environ["HTTP_HOST"] = "{}:{}".format( + forwarded_host, forwarded_port + ) + + if forwarded_port: + environ["SERVER_PORT"] = str(forwarded_port) + + if client_addr: + if ":" in client_addr and client_addr[-1] != "]": + addr, port = client_addr.rsplit(":", 1) + environ["REMOTE_ADDR"] = addr.strip() + environ["REMOTE_PORT"] = port.strip() + else: + environ["REMOTE_ADDR"] = client_addr.strip() + + return untrusted_headers + def get_environment(self): """Returns a WSGI environment.""" environ = self.environ @@ -487,25 +763,61 @@ if path.startswith(url_prefix_with_trailing_slash): path = path[len(url_prefix):] - environ = {} - environ['REQUEST_METHOD'] = request.command.upper() - environ['SERVER_PORT'] = str(server.effective_port) - environ['SERVER_NAME'] = server.server_name - environ['SERVER_SOFTWARE'] = server.adj.ident - environ['SERVER_PROTOCOL'] = 'HTTP/%s' % self.version - environ['SCRIPT_NAME'] = url_prefix - environ['PATH_INFO'] = path - environ['QUERY_STRING'] = request.query - host = environ['REMOTE_ADDR'] = channel.addr[0] + environ = { + 'REQUEST_METHOD': request.command.upper(), + 'SERVER_PORT': str(server.effective_port), + 'SERVER_NAME': server.server_name, + 'SERVER_SOFTWARE': server.adj.ident, + 'SERVER_PROTOCOL': 'HTTP/%s' % self.version, + 'SCRIPT_NAME': url_prefix, + 'PATH_INFO': path, + 'QUERY_STRING': request.query, + 'wsgi.url_scheme': request.url_scheme, + + # the following environment variables are required by the WSGI spec + 'wsgi.version': (1, 0), + + # apps should use the logging module + 'wsgi.errors': sys.stderr, + 'wsgi.multithread': True, + 'wsgi.multiprocess': False, + 'wsgi.run_once': False, + 'wsgi.input': request.get_body_stream(), + 'wsgi.file_wrapper': ReadOnlyFileBasedBuffer, + 'wsgi.input_terminated': True, # wsgi.input is EOF terminated + } + remote_peer = environ['REMOTE_ADDR'] = channel.addr[0] headers = dict(request.headers) - if host == server.adj.trusted_proxy: - wsgi_url_scheme = headers.pop('X_FORWARDED_PROTO', - request.url_scheme) + + untrusted_headers = PROXY_HEADERS + if server.adj.trusted_proxy == '*' or remote_peer == server.adj.trusted_proxy: + untrusted_headers = self.parse_proxy_headers( + environ, + headers, + trusted_proxy_count=server.adj.trusted_proxy_count, + trusted_proxy_headers=server.adj.trusted_proxy_headers, + ) else: - wsgi_url_scheme = request.url_scheme - if wsgi_url_scheme not in ('http', 'https'): - raise ValueError('Invalid X_FORWARDED_PROTO value') + # If we are not relying on a proxy, we still want to try and set + # the REMOTE_PORT to something useful, maybe None though. + environ["REMOTE_PORT"] = str(channel.addr[1]) + + # Nah, we aren't actually going to look up the reverse DNS for + # REMOTE_ADDR, but we will happily set this environment variable for + # the WSGI application. Spec says we can just set this to REMOTE_ADDR, + # so we do. + environ["REMOTE_HOST"] = environ["REMOTE_ADDR"] + + # Clear out the untrusted proxy headers + if server.adj.clear_untrusted_proxy_headers: + clear_untrusted_headers( + headers, + untrusted_headers, + log_warning=server.adj.log_untrusted_proxy_headers, + logger=self.logger, + ) + for key, value in headers.items(): value = value.strip() mykey = rename_headers.get(key, None) @@ -514,15 +826,6 @@ if mykey not in environ: environ[mykey] = value - # the following environment variables are required by the WSGI spec - environ['wsgi.version'] = (1, 0) - environ['wsgi.url_scheme'] = wsgi_url_scheme - environ['wsgi.errors'] = sys.stderr # apps should use the logging module - environ['wsgi.multithread'] = True - environ['wsgi.multiprocess'] = False - environ['wsgi.run_once'] = False - environ['wsgi.input'] = request.get_body_stream() - environ['wsgi.file_wrapper'] = ReadOnlyFileBasedBuffer - + # cache the environ for this request self.environ = environ return environ diff -Nru waitress-1.1.0/waitress/tests/test_adjustments.py waitress-1.2.0~b2/waitress/tests/test_adjustments.py --- waitress-1.1.0/waitress/tests/test_adjustments.py 2017-10-11 00:59:52.000000000 +0000 +++ waitress-1.2.0~b2/waitress/tests/test_adjustments.py 2019-01-02 20:46:09.000000000 +0000 @@ -1,5 +1,6 @@ import sys import socket +import warnings from waitress.compat import ( PY2, @@ -49,6 +50,31 @@ result = self._callFUT(1) self.assertEqual(result, True) +class Test_as_socket_list(unittest.TestCase): + + def test_only_sockets_in_list(self): + from waitress.adjustments import as_socket_list + sockets = [ + socket.socket(socket.AF_INET, socket.SOCK_STREAM), + socket.socket(socket.AF_INET6, socket.SOCK_STREAM)] + if hasattr(socket, 'AF_UNIX'): + sockets.append(socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)) + new_sockets = as_socket_list(sockets) + self.assertEqual(sockets, new_sockets) + for sock in sockets: + sock.close() + + def test_not_only_sockets_in_list(self): + from waitress.adjustments import as_socket_list + sockets = [ + socket.socket(socket.AF_INET, socket.SOCK_STREAM), + socket.socket(socket.AF_INET6, socket.SOCK_STREAM), + {'something': 'else'}] + new_sockets = as_socket_list(sockets) + self.assertEqual(new_sockets, [sockets[0], sockets[1]]) + for sock in [sock for sock in sockets if isinstance(sock, socket.socket)]: + sock.close() + class TestAdjustments(unittest.TestCase): def _hasIPv6(self): # pragma: nocover @@ -83,6 +109,9 @@ port='8080', threads='5', trusted_proxy='192.168.1.1', + trusted_proxy_headers={'forwarded'}, + trusted_proxy_count=2, + log_untrusted_proxy_headers=True, url_scheme='https', backlog='20', recv_bytes='200', @@ -99,7 +128,6 @@ ident='abc', asyncore_loop_timeout='5', asyncore_use_poll=True, - unix_socket='/tmp/waitress.sock', unix_socket_perms='777', url_prefix='///foo/', ipv4=True, @@ -110,6 +138,9 @@ self.assertEqual(inst.port, 8080) self.assertEqual(inst.threads, 5) self.assertEqual(inst.trusted_proxy, '192.168.1.1') + self.assertEqual(inst.trusted_proxy_headers, {'forwarded'}) + self.assertEqual(inst.trusted_proxy_count, 2) + self.assertEqual(inst.log_untrusted_proxy_headers, True) self.assertEqual(inst.url_scheme, 'https') self.assertEqual(inst.backlog, 20) self.assertEqual(inst.recv_bytes, 200) @@ -126,7 +157,6 @@ self.assertEqual(inst.asyncore_loop_timeout, 5) self.assertEqual(inst.asyncore_use_poll, True) self.assertEqual(inst.ident, 'abc') - self.assertEqual(inst.unix_socket, '/tmp/waitress.sock') self.assertEqual(inst.unix_socket_perms, 0o777) self.assertEqual(inst.url_prefix, '/foo') self.assertEqual(inst.ipv4, True) @@ -210,6 +240,118 @@ listen='127.0.0.1:8080', ) + def test_good_sockets(self): + sockets = [ + socket.socket(socket.AF_INET6, socket.SOCK_STREAM), + socket.socket(socket.AF_INET, socket.SOCK_STREAM)] + inst = self._makeOne(sockets=sockets) + self.assertEqual(inst.sockets, sockets) + sockets[0].close() + sockets[1].close() + + def test_dont_mix_sockets_and_listen(self): + sockets = [socket.socket(socket.AF_INET, socket.SOCK_STREAM)] + self.assertRaises( + ValueError, + self._makeOne, + listen='127.0.0.1:8080', + sockets=sockets) + sockets[0].close() + + def test_dont_mix_sockets_and_host_port(self): + sockets = [socket.socket(socket.AF_INET, socket.SOCK_STREAM)] + self.assertRaises( + ValueError, + self._makeOne, + host='localhost', + port='8080', + sockets=sockets) + sockets[0].close() + + def test_dont_mix_sockets_and_unix_socket(self): + sockets = [socket.socket(socket.AF_INET, socket.SOCK_STREAM)] + self.assertRaises( + ValueError, + self._makeOne, + unix_socket='./tmp/test', + sockets=sockets) + sockets[0].close() + + def test_dont_mix_unix_socket_and_host_port(self): + self.assertRaises( + ValueError, + self._makeOne, + unix_socket='./tmp/test', + host='localhost', + port='8080') + + def test_dont_mix_unix_socket_and_listen(self): + self.assertRaises( + ValueError, + self._makeOne, + unix_socket='./tmp/test', + listen='127.0.0.1:8080') + + def test_dont_use_unsupported_socket_types(self): + sockets = [socket.socket(socket.AF_INET, socket.SOCK_DGRAM)] + self.assertRaises( + ValueError, + self._makeOne, + sockets=sockets) + sockets[0].close() + + def test_dont_mix_forwarded_with_x_forwarded(self): + with self.assertRaises(ValueError) as cm: + self._makeOne(trusted_proxy='localhost', trusted_proxy_headers={'forwarded', 'x-forwarded-for'}) + + self.assertIn('The Forwarded proxy header', str(cm.exception)) + + def test_unknown_trusted_proxy_header(self): + with self.assertRaises(ValueError) as cm: + self._makeOne(trusted_proxy='localhost', trusted_proxy_headers={'forwarded', 'x-forwarded-unknown'}) + + self.assertIn( + 'unknown trusted_proxy_headers value (x-forwarded-unknown)', + str(cm.exception) + ) + + def test_trusted_proxy_headers_no_trusted_proxy(self): + with self.assertRaises(ValueError) as cm: + self._makeOne(trusted_proxy_headers={'forwarded'}) + + self.assertIn( + 'Cowardly refusing to continue.', + str(cm.exception) + ) + + def test_trusted_proxy_headers_string_list(self): + inst = self._makeOne(trusted_proxy='localhost', trusted_proxy_headers='x-forwarded-for x-forwarded-by') + self.assertEqual(inst.trusted_proxy_headers, {'x-forwarded-for', 'x-forwarded-by'}) + + def test_trusted_proxy_headers_string_list_newlines(self): + inst = self._makeOne(trusted_proxy='localhost', trusted_proxy_headers='x-forwarded-for\nx-forwarded-by\nx-forwarded-host') + self.assertEqual(inst.trusted_proxy_headers, {'x-forwarded-for', 'x-forwarded-by', 'x-forwarded-host'}) + + def test_no_trusted_proxy_headers_trusted_proxy(self): + with warnings.catch_warnings(record=True) as w: + warnings.resetwarnings() + warnings.simplefilter("always") + self._makeOne(trusted_proxy='localhost') + + self.assertGreaterEqual(len(w), 1) + self.assertTrue(issubclass(w[0].category, DeprecationWarning)) + self.assertIn("Implicitly trusting X-Forwarded-Proto", str(w[0])) + + def test_clear_untrusted_proxy_headers(self): + with warnings.catch_warnings(record=True) as w: + warnings.resetwarnings() + warnings.simplefilter("always") + self._makeOne(trusted_proxy='localhost', trusted_proxy_headers={'x-forwarded-for'}) + + self.assertGreaterEqual(len(w), 1) + self.assertTrue(issubclass(w[0].category, DeprecationWarning)) + self.assertIn("clear_untrusted_proxy_headers will be set to True", str(w[0])) + def test_badvar(self): self.assertRaises(ValueError, self._makeOne, nope=True) @@ -219,6 +361,17 @@ def test_ipv6_disabled(self): self.assertRaises(ValueError, self._makeOne, ipv6=False, listen="[::]:8080") + def test_server_header_removable(self): + inst = self._makeOne(ident=None) + self.assertEqual(inst.ident, None) + + inst = self._makeOne(ident='') + self.assertEqual(inst.ident, None) + + inst = self._makeOne(ident='specific_header') + self.assertEqual(inst.ident, 'specific_header') + + class TestCLI(unittest.TestCase): def parse(self, argv): @@ -292,3 +445,21 @@ def test_bad_param(self): import getopt self.assertRaises(getopt.GetoptError, self.parse, ['--no-host']) + + +if hasattr(socket, 'AF_UNIX'): + class TestUnixSocket(unittest.TestCase): + def _makeOne(self, **kw): + from waitress.adjustments import Adjustments + return Adjustments(**kw) + + def test_dont_mix_internet_and_unix_sockets(self): + sockets = [ + socket.socket(socket.AF_INET, socket.SOCK_STREAM), + socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)] + self.assertRaises( + ValueError, + self._makeOne, + sockets=sockets) + sockets[0].close() + sockets[1].close() diff -Nru waitress-1.1.0/waitress/tests/test_functional.py waitress-1.2.0~b2/waitress/tests/test_functional.py --- waitress-1.1.0/waitress/tests/test_functional.py 2017-10-11 00:59:52.000000000 +0000 +++ waitress-1.2.0~b2/waitress/tests/test_functional.py 2019-01-02 20:46:09.000000000 +0000 @@ -155,7 +155,7 @@ self.assertTrue(headers.get('date')) def test_bad_host_header(self): - # http://corte.si/posts/code/pathod/pythonservers/index.html + # https://corte.si/posts/code/pathod/pythonservers/index.html to_send = ("GET / HTTP/1.0\n" " Host: 0\n\n") to_send = tobytes(to_send) diff -Nru waitress-1.1.0/waitress/tests/test_init.py waitress-1.2.0~b2/waitress/tests/test_init.py --- waitress-1.1.0/waitress/tests/test_init.py 2017-10-11 00:59:52.000000000 +0000 +++ waitress-1.2.0~b2/waitress/tests/test_init.py 2019-01-02 20:46:09.000000000 +0000 @@ -14,6 +14,7 @@ self.assertEqual(result, None) self.assertEqual(server.ran, True) + class Test_serve_paste(unittest.TestCase): def _callFUT(self, app, **kw): diff -Nru waitress-1.1.0/waitress/tests/test_parser.py waitress-1.2.0~b2/waitress/tests/test_parser.py --- waitress-1.1.0/waitress/tests/test_parser.py 2017-10-11 00:59:52.000000000 +0000 +++ waitress-1.2.0~b2/waitress/tests/test_parser.py 2019-01-02 20:46:09.000000000 +0000 @@ -286,7 +286,7 @@ self.assertEqual(result, [b'slam\tslim']) def test_get_header_lines_malformed(self): - # http://corte.si/posts/code/pathod/pythonservers/index.html + # https://corte.si/posts/code/pathod/pythonservers/index.html from waitress.parser import ParsingError self.assertRaises(ParsingError, self._callFUT, b' Host: localhost\r\n\r\n') diff -Nru waitress-1.1.0/waitress/tests/test_server.py waitress-1.2.0~b2/waitress/tests/test_server.py --- waitress-1.1.0/waitress/tests/test_server.py 2017-10-11 00:59:52.000000000 +0000 +++ waitress-1.2.0~b2/waitress/tests/test_server.py 2019-01-02 20:46:09.000000000 +0000 @@ -10,7 +10,7 @@ _dispatcher=None, adj=None, map=None, _start=True, _sock=None, _server=None): from waitress.server import create_server - return create_server( + self.inst = create_server( application, host=host, port=port, @@ -18,6 +18,7 @@ _dispatcher=_dispatcher, _start=_start, _sock=_sock) + return self.inst def _makeOneWithMap(self, adj=None, _start=True, host='127.0.0.1', port=0, app=dummy_app): @@ -40,15 +41,36 @@ task_dispatcher = DummyTaskDispatcher() map = {} from waitress.server import create_server - return create_server( + self.inst = create_server( app, listen=listen, map=map, _dispatcher=task_dispatcher, _start=_start, _sock=sock) + return self.inst + + def _makeWithSockets(self, application=dummy_app, _dispatcher=None, map=None, + _start=True, _sock=None, _server=None, sockets=None): + from waitress.server import create_server + _sockets = [] + if sockets is not None: + _sockets = sockets + self.inst = create_server( + application, + map=map, + _dispatcher=_dispatcher, + _start=_start, + _sock=_sock, + sockets=_sockets) + return self.inst + + def tearDown(self): + if self.inst is not None: + self.inst.close() def test_ctor_app_is_None(self): + self.inst = None self.assertRaises(ValueError, self._makeOneWithMap, app=None) def test_ctor_start_true(self): @@ -67,8 +89,7 @@ def test_get_server_name_empty(self): inst = self._makeOneWithMap(_start=False) - result = inst.get_server_name('') - self.assertTrue(result) + self.assertRaises(ValueError, inst.get_server_name, '') def test_get_server_name_with_ip(self): inst = self._makeOneWithMap(_start=False) @@ -83,7 +104,17 @@ def test_get_server_name_0000(self): inst = self._makeOneWithMap(_start=False) result = inst.get_server_name('0.0.0.0') - self.assertEqual(result, 'localhost') + self.assertTrue(len(result) != 0) + + def test_get_server_name_double_colon(self): + inst = self._makeOneWithMap(_start=False) + result = inst.get_server_name('::') + self.assertTrue(len(result) != 0) + + def test_get_server_name_ipv6(self): + inst = self._makeOneWithMap(_start=False) + result = inst.get_server_name('2001:DB8::ffff') + self.assertEqual('[2001:DB8::ffff]', result) def test_get_server_multi(self): inst = self._makeOneWithMulti() @@ -105,6 +136,7 @@ def test_pull_trigger(self): inst = self._makeOneWithMap(_start=False) + inst.trigger.close() inst.trigger = DummyTrigger() inst.pull_trigger() self.assertEqual(inst.trigger.pulled, True) @@ -215,10 +247,51 @@ from waitress.server import WSGIServer, TcpWSGIServer from waitress.adjustments import Adjustments self.assertTrue(WSGIServer is TcpWSGIServer) - inst = WSGIServer(None, _start=False, port=1234) + self.inst = WSGIServer(None, _start=False, port=1234) # Ensure the adjustment was actually applied. self.assertNotEqual(Adjustments.port, 1234) - self.assertEqual(inst.adj.port, 1234) + self.assertEqual(self.inst.adj.port, 1234) + + def test_create_with_one_tcp_socket(self): + from waitress.server import TcpWSGIServer + sockets = [socket.socket(socket.AF_INET, socket.SOCK_STREAM)] + sockets[0].bind(('127.0.0.1', 0)) + inst = self._makeWithSockets(_start=False, sockets=sockets) + self.assertTrue(isinstance(inst, TcpWSGIServer)) + + def test_create_with_multiple_tcp_sockets(self): + from waitress.server import MultiSocketServer + sockets = [ + socket.socket(socket.AF_INET, socket.SOCK_STREAM), + socket.socket(socket.AF_INET, socket.SOCK_STREAM)] + sockets[0].bind(('127.0.0.1', 0)) + sockets[1].bind(('127.0.0.1', 0)) + inst = self._makeWithSockets(_start=False, sockets=sockets) + self.assertTrue(isinstance(inst, MultiSocketServer)) + self.assertEqual(len(inst.effective_listen), 2) + + def test_create_with_one_socket_should_not_bind_socket(self): + innersock = DummySock() + sockets = [DummySock(acceptresult=(innersock, None))] + sockets[0].bind(('127.0.0.1', 80)) + sockets[0].bind_called = False + inst = self._makeWithSockets(_start=False, sockets=sockets) + self.assertEqual(inst.socket.bound, ('127.0.0.1', 80)) + self.assertFalse(inst.socket.bind_called) + + def test_create_with_one_socket_handle_accept_noerror(self): + innersock = DummySock() + sockets = [DummySock(acceptresult=(innersock, None))] + sockets[0].bind(('127.0.0.1', 80)) + inst = self._makeWithSockets(sockets=sockets) + L = [] + inst.channel_class = lambda *arg, **kw: L.append(arg) + inst.adj = DummyAdj + inst.handle_accept() + self.assertEqual(sockets[0].accepted, True) + self.assertEqual(innersock.opts, [('level', 'optname', 'value')]) + self.assertEqual(L, [(inst, innersock, None, inst.adj)]) + if hasattr(socket, 'AF_UNIX'): @@ -227,7 +300,7 @@ def _makeOne(self, _start=True, _sock=None): from waitress.server import create_server - return create_server( + self.inst = create_server( dummy_app, map={}, _start=_start, @@ -236,6 +309,25 @@ unix_socket=self.unix_socket, unix_socket_perms='600' ) + return self.inst + + def _makeWithSockets(self, application=dummy_app, _dispatcher=None, map=None, + _start=True, _sock=None, _server=None, sockets=None): + from waitress.server import create_server + _sockets = [] + if sockets is not None: + _sockets = sockets + self.inst = create_server( + application, + map=map, + _dispatcher=_dispatcher, + _start=_start, + _sock=_sock, + sockets=_sockets) + return self.inst + + def tearDown(self): + self.inst.close() def _makeDummy(self, *args, **kwargs): sock = DummySock(*args, **kwargs) @@ -268,26 +360,43 @@ def test_creates_new_sockinfo(self): from waitress.server import UnixWSGIServer - inst = UnixWSGIServer( + self.inst = UnixWSGIServer( dummy_app, unix_socket=self.unix_socket, unix_socket_perms='600' ) - self.assertEqual(inst.sockinfo[0], socket.AF_UNIX) + self.assertEqual(self.inst.sockinfo[0], socket.AF_UNIX) -class DummySock(object): + def test_create_with_unix_socket(self): + from waitress.server import MultiSocketServer, BaseWSGIServer, \ + TcpWSGIServer, UnixWSGIServer + sockets = [ + socket.socket(socket.AF_UNIX, socket.SOCK_STREAM), + socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)] + inst = self._makeWithSockets(sockets=sockets, _start=False) + self.assertTrue(isinstance(inst, MultiSocketServer)) + server = list(filter(lambda s: isinstance(s, BaseWSGIServer), inst.map.values())) + self.assertTrue(isinstance(server[0], UnixWSGIServer)) + self.assertTrue(isinstance(server[1], UnixWSGIServer)) + + +class DummySock(socket.socket): accepted = False blocking = False family = socket.AF_INET + type = socket.SOCK_STREAM + proto = 0 def __init__(self, toraise=None, acceptresult=(None, None)): self.toraise = toraise self.acceptresult = acceptresult self.bound = None self.opts = [] + self.bind_called = False def bind(self, addr): + self.bind_called = True self.bound = addr def accept(self): @@ -317,6 +426,9 @@ def getsockname(self): return self.bound + def close(self): + pass + class DummyTaskDispatcher(object): def __init__(self): @@ -358,6 +470,9 @@ def pull_trigger(self): self.pulled = True + def close(self): + pass + class DummyLogger(object): def __init__(self): diff -Nru waitress-1.1.0/waitress/tests/test_task.py waitress-1.2.0~b2/waitress/tests/test_task.py --- waitress-1.1.0/waitress/tests/test_task.py 2017-10-11 00:59:52.000000000 +0000 +++ waitress-1.2.0~b2/waitress/tests/test_task.py 2019-01-02 20:46:09.000000000 +0000 @@ -65,6 +65,15 @@ self.assertEqual(inst.queue.qsize(), 1) self.assertTrue(task.deferred) + def test_log_queue_depth(self): + task = DummyTask() + inst = self._makeOne() + inst.queue_logger = DummyLogger() + inst.add_task(task) + self.assertEqual(len(inst.queue_logger.logged), 0) + inst.add_task(task) + self.assertEqual(len(inst.queue_logger.logged), 1) + def test_add_task_defer_raises(self): task = DummyTask(ValueError) inst = self._makeOne() @@ -202,6 +211,57 @@ self.assertEqual(inst.close_on_finish, True) self.assertTrue(('Connection', 'close') in inst.response_headers) + def test_build_response_header_v11_204_no_content_length_or_transfer_encoding(self): + # RFC 7230: MUST NOT send Transfer-Encoding or Content-Length + # for any response with a status code of 1xx or 204. + inst = self._makeOne() + inst.request = DummyParser() + inst.version = '1.1' + inst.status = '204 No Content' + result = inst.build_response_header() + lines = filter_lines(result) + self.assertEqual(len(lines), 4) + self.assertEqual(lines[0], b'HTTP/1.1 204 No Content') + self.assertEqual(lines[1], b'Connection: close') + self.assertTrue(lines[2].startswith(b'Date:')) + self.assertEqual(lines[3], b'Server: waitress') + self.assertEqual(inst.close_on_finish, True) + self.assertTrue(('Connection', 'close') in inst.response_headers) + + def test_build_response_header_v11_1xx_no_content_length_or_transfer_encoding(self): + # RFC 7230: MUST NOT send Transfer-Encoding or Content-Length + # for any response with a status code of 1xx or 204. + inst = self._makeOne() + inst.request = DummyParser() + inst.version = '1.1' + inst.status = '100 Continue' + result = inst.build_response_header() + lines = filter_lines(result) + self.assertEqual(len(lines), 4) + self.assertEqual(lines[0], b'HTTP/1.1 100 Continue') + self.assertEqual(lines[1], b'Connection: close') + self.assertTrue(lines[2].startswith(b'Date:')) + self.assertEqual(lines[3], b'Server: waitress') + self.assertEqual(inst.close_on_finish, True) + self.assertTrue(('Connection', 'close') in inst.response_headers) + + def test_build_response_header_v11_304_no_content_length_or_transfer_encoding(self): + # RFC 7230: MUST NOT send Transfer-Encoding or Content-Length + # for any response with a status code of 1xx, 204 or 304. + inst = self._makeOne() + inst.request = DummyParser() + inst.version = '1.1' + inst.status = '304 Not Modified' + result = inst.build_response_header() + lines = filter_lines(result) + self.assertEqual(len(lines), 4) + self.assertEqual(lines[0], b'HTTP/1.1 304 Not Modified') + self.assertEqual(lines[1], b'Connection: close') + self.assertTrue(lines[2].startswith(b'Date:')) + self.assertEqual(lines[3], b'Server: waitress') + self.assertEqual(inst.close_on_finish, True) + self.assertTrue(('Connection', 'close') in inst.response_headers) + def test_build_response_header_via_added(self): inst = self._makeOne() inst.request = DummyParser() @@ -248,6 +308,12 @@ inst.remove_content_length_header() self.assertEqual(inst.response_headers, []) + def test_remove_content_length_header_with_other(self): + inst = self._makeOne() + inst.response_headers = [('Content-Length', '70'), ('Content-Type', 'text/html')] + inst.remove_content_length_header() + self.assertEqual(inst.response_headers, [('Content-Type', 'text/html')]) + def test_start(self): inst = self._makeOne() inst.start() @@ -518,6 +584,34 @@ self.assertEqual(inst.close_on_finish, True) self.assertEqual(len(inst.logger.logged), 0) + def test_execute_app_without_body_204_logged(self): + def app(environ, start_response): + start_response('204 No Content', [('Content-Length', '3')]) + return [b'abc'] + inst = self._makeOne() + inst.channel.server.application = app + inst.logger = DummyLogger() + inst.execute() + self.assertEqual(inst.close_on_finish, True) + self.assertNotIn(b'abc', inst.channel.written) + self.assertNotIn(b'Content-Length', inst.channel.written) + self.assertNotIn(b'Transfer-Encoding', inst.channel.written) + self.assertEqual(len(inst.logger.logged), 1) + + def test_execute_app_without_body_304_logged(self): + def app(environ, start_response): + start_response('304 Not Modified', [('Content-Length', '3')]) + return [b'abc'] + inst = self._makeOne() + inst.channel.server.application = app + inst.logger = DummyLogger() + inst.execute() + self.assertEqual(inst.close_on_finish, True) + self.assertNotIn(b'abc', inst.channel.written) + self.assertNotIn(b'Content-Length', inst.channel.written) + self.assertNotIn(b'Transfer-Encoding', inst.channel.written) + self.assertEqual(len(inst.logger.logged), 1) + def test_execute_app_returns_closeable(self): class closeable(list): def close(self): @@ -658,11 +752,13 @@ # nail the keys of environ self.assertEqual(sorted(environ.keys()), [ 'CONTENT_LENGTH', 'CONTENT_TYPE', 'HTTP_CONNECTION', 'HTTP_X_FOO', - 'PATH_INFO', 'QUERY_STRING', 'REMOTE_ADDR', 'REQUEST_METHOD', - 'SCRIPT_NAME', 'SERVER_NAME', 'SERVER_PORT', 'SERVER_PROTOCOL', - 'SERVER_SOFTWARE', 'wsgi.errors', 'wsgi.file_wrapper', 'wsgi.input', + 'PATH_INFO', 'QUERY_STRING', 'REMOTE_ADDR', 'REMOTE_HOST', + 'REMOTE_PORT', 'REQUEST_METHOD', 'SCRIPT_NAME', 'SERVER_NAME', + 'SERVER_PORT', 'SERVER_PROTOCOL', 'SERVER_SOFTWARE', 'wsgi.errors', + 'wsgi.file_wrapper', 'wsgi.input', 'wsgi.input_terminated', 'wsgi.multiprocess', 'wsgi.multithread', 'wsgi.run_once', - 'wsgi.url_scheme', 'wsgi.version']) + 'wsgi.url_scheme', 'wsgi.version' + ]) self.assertEqual(environ['REQUEST_METHOD'], 'GET') self.assertEqual(environ['SERVER_PORT'], '80') @@ -674,6 +770,8 @@ self.assertEqual(environ['PATH_INFO'], '/') self.assertEqual(environ['QUERY_STRING'], 'abc') self.assertEqual(environ['REMOTE_ADDR'], '127.0.0.1') + self.assertEqual(environ['REMOTE_HOST'], '127.0.0.1') + self.assertEqual(environ['REMOTE_PORT'], '39830') self.assertEqual(environ['CONTENT_TYPE'], 'abc') self.assertEqual(environ['CONTENT_LENGTH'], '10') self.assertEqual(environ['HTTP_X_FOO'], 'BAR') @@ -684,6 +782,7 @@ self.assertEqual(environ['wsgi.multiprocess'], False) self.assertEqual(environ['wsgi.run_once'], False) self.assertEqual(environ['wsgi.input'], 'stream') + self.assertEqual(environ['wsgi.input_terminated'], True) self.assertEqual(inst.environ, environ) def test_get_environment_values_w_scheme_override_untrusted(self): @@ -704,8 +803,9 @@ def test_get_environment_values_w_scheme_override_trusted(self): import sys inst = self._makeOne() - inst.channel.addr = ['192.168.1.1'] + inst.channel.addr = ['192.168.1.1', 8080] inst.channel.server.adj.trusted_proxy = '192.168.1.1' + inst.channel.server.adj.trusted_proxy_headers = {'x-forwarded-proto'} request = DummyParser() request.headers = { 'CONTENT_TYPE': 'abc', @@ -721,14 +821,16 @@ # nail the keys of environ self.assertEqual(sorted(environ.keys()), [ 'CONTENT_LENGTH', 'CONTENT_TYPE', 'HTTP_CONNECTION', 'HTTP_X_FOO', - 'PATH_INFO', 'QUERY_STRING', 'REMOTE_ADDR', 'REQUEST_METHOD', - 'SCRIPT_NAME', 'SERVER_NAME', 'SERVER_PORT', 'SERVER_PROTOCOL', - 'SERVER_SOFTWARE', 'wsgi.errors', 'wsgi.file_wrapper', 'wsgi.input', - 'wsgi.multiprocess', 'wsgi.multithread', 'wsgi.run_once', - 'wsgi.url_scheme', 'wsgi.version']) + 'HTTP_X_FORWARDED_PROTO', 'PATH_INFO', 'QUERY_STRING', + 'REMOTE_ADDR', 'REMOTE_HOST', 'REQUEST_METHOD', 'SCRIPT_NAME', + 'SERVER_NAME', 'SERVER_PORT', 'SERVER_PROTOCOL', 'SERVER_SOFTWARE', + 'wsgi.errors', 'wsgi.file_wrapper', 'wsgi.input', + 'wsgi.input_terminated', 'wsgi.multiprocess', 'wsgi.multithread', + 'wsgi.run_once', 'wsgi.url_scheme', 'wsgi.version' + ]) self.assertEqual(environ['REQUEST_METHOD'], 'GET') - self.assertEqual(environ['SERVER_PORT'], '80') + self.assertEqual(environ['SERVER_PORT'], '443') self.assertEqual(environ['SERVER_NAME'], 'localhost') self.assertEqual(environ['SERVER_SOFTWARE'], 'waitress') self.assertEqual(environ['SERVER_PROTOCOL'], 'HTTP/1.0') @@ -747,12 +849,14 @@ self.assertEqual(environ['wsgi.multiprocess'], False) self.assertEqual(environ['wsgi.run_once'], False) self.assertEqual(environ['wsgi.input'], 'stream') + self.assertEqual(environ['wsgi.input_terminated'], True) self.assertEqual(inst.environ, environ) def test_get_environment_values_w_bogus_scheme_override(self): inst = self._makeOne() - inst.channel.addr = ['192.168.1.1'] + inst.channel.addr = ['192.168.1.1', 80] inst.channel.server.adj.trusted_proxy = '192.168.1.1' + inst.channel.server.adj.trusted_proxy_headers = {'x-forwarded-proto'} request = DummyParser() request.headers = { 'CONTENT_TYPE': 'abc', @@ -765,6 +869,519 @@ inst.request = request self.assertRaises(ValueError, inst.get_environment) + def test_get_environment_warning_other_proxy_headers(self): + inst = self._makeOne() + inst.logger = DummyLogger() + + inst.request.headers = { + 'X_FORWARDED_FOR': '[2001:db8::1]', + 'FORWARDED': 'For=198.51.100.2;host=example.com:8080;proto=https' + } + inst.channel.addr = ['192.168.1.1', 80] + inst.channel.server.adj.trusted_proxy = '192.168.1.1' + inst.channel.server.adj.trusted_proxy_count = 1 + inst.channel.server.adj.trusted_proxy_headers = {'forwarded'} + inst.channel.server.adj.log_untrusted_proxy_headers = True + environ = inst.get_environment() + + self.assertEqual(len(inst.logger.logged), 1) + self.assertNotIn('HTTP_X_FORWARDED_FOR', environ) + + self.assertEqual(environ['REMOTE_ADDR'], '198.51.100.2') + self.assertEqual(environ['SERVER_NAME'], 'example.com') + self.assertEqual(environ['HTTP_HOST'], 'example.com:8080') + self.assertEqual(environ['SERVER_PORT'], '8080') + self.assertEqual(environ['wsgi.url_scheme'], 'https') + + def test_get_environment_contains_all_headers_including_untrusted(self): + inst = self._makeOne() + inst.logger = DummyLogger() + + inst.request.headers = { + 'X_FORWARDED_FOR': '198.51.100.2', + 'X_FORWARDED_BY': 'Waitress', + 'X_FORWARDED_PROTO': 'https', + 'X_FORWARDED_HOST': 'example.org', + } + headers_orig = inst.request.headers.copy() + inst.channel.addr = ['192.168.1.1', 80] + inst.channel.server.adj.trusted_proxy = '192.168.1.1' + inst.channel.server.adj.trusted_proxy_count = 1 + inst.channel.server.adj.trusted_proxy_headers = {'x-forwarded-by'} + inst.channel.server.adj.clear_untrusted_proxy_headers = False + environ = inst.get_environment() + + for k, expected in headers_orig.items(): + result = environ['HTTP_%s' % k] + self.assertEqual(result, expected) + + def test_get_environment_contains_only_trusted_headers(self): + inst = self._makeOne() + inst.logger = DummyLogger() + + inst.request.headers = { + 'X_FORWARDED_FOR': '198.51.100.2', + 'X_FORWARDED_BY': 'Waitress', + 'X_FORWARDED_PROTO': 'https', + 'X_FORWARDED_HOST': 'example.org', + } + inst.channel.addr = ['192.168.1.1', 80] + inst.channel.server.adj.trusted_proxy = '192.168.1.1' + inst.channel.server.adj.trusted_proxy_count = 1 + inst.channel.server.adj.trusted_proxy_headers = {'x-forwarded-by'} + inst.channel.server.adj.clear_untrusted_proxy_headers = True + environ = inst.get_environment() + + self.assertEqual(environ['HTTP_X_FORWARDED_BY'], 'Waitress') + self.assertNotIn('HTTP_X_FORWARDED_FOR', environ) + self.assertNotIn('HTTP_X_FORWARDED_PROTO', environ) + self.assertNotIn('HTTP_X_FORWARDED_HOST', environ) + + def test_get_environment_clears_headers_if_untrusted_proxy(self): + inst = self._makeOne() + inst.logger = DummyLogger() + + inst.request.headers = { + 'X_FORWARDED_FOR': '198.51.100.2', + 'X_FORWARDED_BY': 'Waitress', + 'X_FORWARDED_PROTO': 'https', + 'X_FORWARDED_HOST': 'example.org', + } + inst.channel.addr = ['192.168.1.255', 80] + inst.channel.server.adj.trusted_proxy = '192.168.1.1' + inst.channel.server.adj.trusted_proxy_count = 1 + inst.channel.server.adj.trusted_proxy_headers = {'x-forwarded-by'} + inst.channel.server.adj.clear_untrusted_proxy_headers = True + environ = inst.get_environment() + + self.assertNotIn('HTTP_X_FORWARDED_BY', environ) + self.assertNotIn('HTTP_X_FORWARDED_FOR', environ) + self.assertNotIn('HTTP_X_FORWARDED_PROTO', environ) + self.assertNotIn('HTTP_X_FORWARDED_HOST', environ) + + def test_parse_proxy_headers_forwarded_for(self): + inst = self._makeOne() + + headers = { + 'X_FORWARDED_FOR': '192.0.2.1' + } + environ = {} + inst.parse_proxy_headers( + environ, + headers, + trusted_proxy_count=1, + trusted_proxy_headers={'x-forwarded-for'} + ) + + self.assertEqual(environ['REMOTE_ADDR'], '192.0.2.1') + + def test_parse_proxy_headers_forwarded_for_v6_missing_brackets(self): + inst = self._makeOne() + + headers = { + 'X_FORWARDED_FOR': '2001:db8::0' + } + environ = {} + inst.parse_proxy_headers( + environ, + headers, + trusted_proxy_count=1, + trusted_proxy_headers={'x-forwarded-for'} + ) + + self.assertEqual(environ['REMOTE_ADDR'], '[2001:db8::0]') + + def test_parse_proxy_headers_forwared_for_multiple(self): + inst = self._makeOne() + + headers = { + 'X_FORWARDED_FOR': '192.0.2.1, 198.51.100.2, 203.0.113.1' + } + environ = {} + inst.parse_proxy_headers( + environ, + headers, + trusted_proxy_count=2, + trusted_proxy_headers={'x-forwarded-for'} + ) + self.assertEqual(environ['REMOTE_ADDR'], '198.51.100.2') + + def test_parse_forwarded_multiple_proxies_trust_only_two(self): + inst = self._makeOne() + + headers = { + 'FORWARDED': 'For=192.0.2.1;host=fake.com, For=198.51.100.2;host=example.com:8080, For=203.0.113.1' + } + environ = {} + inst.parse_proxy_headers( + environ, + headers, + trusted_proxy_count=2, + trusted_proxy_headers={'forwarded'} + ) + + self.assertEqual(environ['REMOTE_ADDR'], '198.51.100.2') + self.assertEqual(environ['SERVER_NAME'], 'example.com') + self.assertEqual(environ['HTTP_HOST'], 'example.com:8080') + self.assertEqual(environ['SERVER_PORT'], '8080') + + def test_parse_forwarded_multiple_proxies(self): + inst = self._makeOne() + + headers = { + 'FORWARDED': 'for="[2001:db8::1]";host="example.com:8443";proto="https", for=192.0.2.1;host="example.internal:8080"' + } + environ = {} + inst.parse_proxy_headers( + environ, + headers, + trusted_proxy_count=2, + trusted_proxy_headers={'forwarded'} + ) + + self.assertEqual(environ['REMOTE_ADDR'], '[2001:db8::1]') + self.assertEqual(environ['SERVER_NAME'], 'example.com') + self.assertEqual(environ['HTTP_HOST'], 'example.com:8443') + self.assertEqual(environ['SERVER_PORT'], '8443') + self.assertEqual(environ['wsgi.url_scheme'], 'https') + + def test_parse_forwarded_multiple_proxies_minimal(self): + inst = self._makeOne() + + headers = { + 'FORWARDED': 'for="[2001:db8::1]";proto="https", for=192.0.2.1;host="example.org"' + } + environ = {} + inst.parse_proxy_headers( + environ, + headers, + trusted_proxy_count=2, + trusted_proxy_headers={'forwarded'} + ) + + self.assertEqual(environ['REMOTE_ADDR'], '[2001:db8::1]') + self.assertEqual(environ['SERVER_NAME'], 'example.org') + self.assertEqual(environ['HTTP_HOST'], 'example.org') + self.assertEqual(environ['SERVER_PORT'], '443') + self.assertEqual(environ['wsgi.url_scheme'], 'https') + + def test_parse_proxy_headers_forwarded_host_with_port(self): + inst = self._makeOne() + + headers = { + 'X_FORWARDED_FOR': '192.0.2.1, 198.51.100.2, 203.0.113.1', + 'X_FORWARDED_PROTO': 'http', + 'X_FORWARDED_HOST': 'example.com:8080', + } + environ = {} + inst.parse_proxy_headers( + environ, + headers, + trusted_proxy_count=2, + trusted_proxy_headers={'x-forwarded-for', 'x-forwarded-proto', 'x-forwarded-host'} + ) + + self.assertEqual(environ['REMOTE_ADDR'], '198.51.100.2') + self.assertEqual(environ['SERVER_NAME'], 'example.com') + self.assertEqual(environ['HTTP_HOST'], 'example.com:8080') + self.assertEqual(environ['SERVER_PORT'], '8080') + + def test_parse_proxy_headers_forwarded_host_without_port(self): + inst = self._makeOne() + + headers = { + 'X_FORWARDED_FOR': '192.0.2.1, 198.51.100.2, 203.0.113.1', + 'X_FORWARDED_PROTO': 'http', + 'X_FORWARDED_HOST': 'example.com', + } + environ = {} + inst.parse_proxy_headers( + environ, + headers, + trusted_proxy_count=2, + trusted_proxy_headers={'x-forwarded-for', 'x-forwarded-proto', 'x-forwarded-host'} + ) + + self.assertEqual(environ['REMOTE_ADDR'], '198.51.100.2') + self.assertEqual(environ['SERVER_NAME'], 'example.com') + self.assertEqual(environ['HTTP_HOST'], 'example.com') + self.assertEqual(environ['SERVER_PORT'], '80') + + def test_parse_proxy_headers_forwarded_host_with_forwarded_port(self): + inst = self._makeOne() + + headers = { + 'X_FORWARDED_FOR': '192.0.2.1, 198.51.100.2, 203.0.113.1', + 'X_FORWARDED_PROTO': 'http', + 'X_FORWARDED_HOST': 'example.com', + 'X_FORWARDED_PORT': '8080' + } + environ = {} + inst.parse_proxy_headers( + environ, + headers, + trusted_proxy_count=2, + trusted_proxy_headers={'x-forwarded-for', 'x-forwarded-proto', 'x-forwarded-host', 'x-forwarded-port'} + ) + + self.assertEqual(environ['REMOTE_ADDR'], '198.51.100.2') + self.assertEqual(environ['SERVER_NAME'], 'example.com') + self.assertEqual(environ['HTTP_HOST'], 'example.com:8080') + self.assertEqual(environ['SERVER_PORT'], '8080') + + def test_parse_proxy_headers_forwarded_host_multiple_with_forwarded_port(self): + inst = self._makeOne() + + headers = { + 'X_FORWARDED_FOR': '192.0.2.1, 198.51.100.2, 203.0.113.1', + 'X_FORWARDED_PROTO': 'http', + 'X_FORWARDED_HOST': 'example.com, example.org', + 'X_FORWARDED_PORT': '8080' + } + environ = {} + inst.parse_proxy_headers( + environ, + headers, + trusted_proxy_count=2, + trusted_proxy_headers={'x-forwarded-for', 'x-forwarded-proto', 'x-forwarded-host', 'x-forwarded-port'} + ) + + self.assertEqual(environ['REMOTE_ADDR'], '198.51.100.2') + self.assertEqual(environ['SERVER_NAME'], 'example.com') + self.assertEqual(environ['HTTP_HOST'], 'example.com:8080') + self.assertEqual(environ['SERVER_PORT'], '8080') + + def test_parse_proxy_headers_forwarded_host_multiple_with_forwarded_port_limit_one_trusted(self): + inst = self._makeOne() + + headers = { + 'X_FORWARDED_FOR': '192.0.2.1, 198.51.100.2, 203.0.113.1', + 'X_FORWARDED_PROTO': 'http', + 'X_FORWARDED_HOST': 'example.com, example.org', + 'X_FORWARDED_PORT': '8080' + } + environ = {} + inst.parse_proxy_headers( + environ, + headers, + trusted_proxy_count=1, + trusted_proxy_headers={'x-forwarded-for', 'x-forwarded-proto', 'x-forwarded-host', 'x-forwarded-port'} + ) + + self.assertEqual(environ['REMOTE_ADDR'], '203.0.113.1') + self.assertEqual(environ['SERVER_NAME'], 'example.org') + self.assertEqual(environ['HTTP_HOST'], 'example.org:8080') + self.assertEqual(environ['SERVER_PORT'], '8080') + + def test_parse_forwarded(self): + inst = self._makeOne() + + headers = { + 'FORWARDED': 'For=198.51.100.2:5858;host=example.com:8080;proto=https' + } + environ = {} + inst.parse_proxy_headers( + environ, + headers, + trusted_proxy_count=1, + trusted_proxy_headers={'forwarded'} + ) + + self.assertEqual(environ['REMOTE_ADDR'], '198.51.100.2') + self.assertEqual(environ['REMOTE_PORT'], '5858') + self.assertEqual(environ['SERVER_NAME'], 'example.com') + self.assertEqual(environ['HTTP_HOST'], 'example.com:8080') + self.assertEqual(environ['SERVER_PORT'], '8080') + self.assertEqual(environ['wsgi.url_scheme'], 'https') + + def test_parse_forwarded_empty_pair(self): + inst = self._makeOne() + + headers = { + 'FORWARDED': 'For=198.51.100.2;;proto=https;by=_unused' + } + environ = {} + inst.parse_proxy_headers( + environ, + headers, + trusted_proxy_count=1, + trusted_proxy_headers={'forwarded'} + ) + + self.assertEqual(environ['REMOTE_ADDR'], '198.51.100.2') + + def test_parse_forwarded_pair_token_whitespace(self): + inst = self._makeOne() + + headers = { + 'FORWARDED': 'For=198.51.100.2; proto =https' + } + environ = {} + + with self.assertRaises(ValueError): + inst.parse_proxy_headers( + environ, + headers, + trusted_proxy_count=1, + trusted_proxy_headers={'forwarded'} + ) + + def test_parse_forwarded_pair_value_whitespace(self): + inst = self._makeOne() + + headers = { + 'FORWARDED': 'For= "198.51.100.2"; proto =https' + } + environ = {} + + with self.assertRaises(ValueError): + inst.parse_proxy_headers( + environ, + headers, + trusted_proxy_count=1, + trusted_proxy_headers={'forwarded'} + ) + + def test_parse_forwarded_pair_no_equals(self): + inst = self._makeOne() + + headers = { + 'FORWARDED': 'For' + } + environ = {} + + with self.assertRaises(ValueError): + inst.parse_proxy_headers( + environ, + headers, + trusted_proxy_count=1, + trusted_proxy_headers={'forwarded'} + ) + + def test_parse_forwarded_warning_unknown_token(self): + inst = self._makeOne() + inst.logger = DummyLogger() + + headers = { + 'FORWARDED': 'For=198.51.100.2;host=example.com:8080;proto=https;unknown="yolo"' + } + environ = {} + inst.parse_proxy_headers( + environ, + headers, + trusted_proxy_count=1, + trusted_proxy_headers={'forwarded'} + ) + + self.assertEqual(len(inst.logger.logged), 1) + self.assertIn('Unknown Forwarded token', inst.logger.logged[0]) + + self.assertEqual(environ['REMOTE_ADDR'], '198.51.100.2') + self.assertEqual(environ['SERVER_NAME'], 'example.com') + self.assertEqual(environ['HTTP_HOST'], 'example.com:8080') + self.assertEqual(environ['SERVER_PORT'], '8080') + self.assertEqual(environ['wsgi.url_scheme'], 'https') + + def test_parse_no_valid_proxy_headers(self): + inst = self._makeOne() + inst.logger = DummyLogger() + + headers = { + 'X_FORWARDED_FOR': '198.51.100.2', + 'FORWARDED': 'For=198.51.100.2;host=example.com:8080;proto=https' + } + environ = {} + inst.parse_proxy_headers( + environ, + headers, + trusted_proxy_count=1, + ) + + self.assertEqual(environ, {}) + + def test_parse_multiple_x_forwarded_proto(self): + inst = self._makeOne() + inst.logger = DummyLogger() + + headers = { + 'X_FORWARDED_PROTO': 'http, https', + } + environ = {} + inst.parse_proxy_headers( + environ, + headers, + trusted_proxy_count=1, + trusted_proxy_headers={'x-forwarded-proto'} + ) + + self.assertEqual(environ, {}) + self.assertEqual(len(inst.logger.logged), 1) + self.assertIn("Found multiple values in X-Forwarded-Proto", inst.logger.logged[0]) + + def test_parse_multiple_x_forwarded_port(self): + inst = self._makeOne() + inst.logger = DummyLogger() + + headers = { + 'X_FORWARDED_PORT': '443, 80', + } + environ = {} + inst.parse_proxy_headers( + environ, + headers, + trusted_proxy_count=1, + trusted_proxy_headers={'x-forwarded-port'} + ) + + self.assertEqual(environ, {}) + self.assertEqual(len(inst.logger.logged), 1) + self.assertIn("Found multiple values in X-Forwarded-Port", inst.logger.logged[0]) + + def test_parse_forwarded_port_wrong_proto_port_80(self): + inst = self._makeOne() + inst.logger = DummyLogger() + + headers = { + 'X_FORWARDED_PORT': '80', + 'X_FORWARDED_PROTO': 'https', + 'X_FORWARDED_HOST': 'example.com', + } + environ = {} + inst.parse_proxy_headers( + environ, + headers, + trusted_proxy_count=1, + trusted_proxy_headers={'x-forwarded-port', 'x-forwarded-host', 'x-forwarded-proto'} + ) + + self.assertEqual(environ['SERVER_NAME'], 'example.com') + self.assertEqual(environ['HTTP_HOST'], 'example.com:80') + self.assertEqual(environ['SERVER_PORT'], '80') + self.assertEqual(environ['wsgi.url_scheme'], 'https') + + def test_parse_forwarded_port_wrong_proto_port_443(self): + inst = self._makeOne() + inst.logger = DummyLogger() + + headers = { + 'X_FORWARDED_PORT': '443', + 'X_FORWARDED_PROTO': 'http', + 'X_FORWARDED_HOST': 'example.com', + } + environ = {} + inst.parse_proxy_headers( + environ, + headers, + trusted_proxy_count=1, + trusted_proxy_headers={'x-forwarded-port', 'x-forwarded-host', 'x-forwarded-proto'} + ) + + self.assertEqual(environ['SERVER_NAME'], 'example.com') + self.assertEqual(environ['HTTP_HOST'], 'example.com:443') + self.assertEqual(environ['SERVER_PORT'], '443') + self.assertEqual(environ['wsgi.url_scheme'], 'http') + + class TestErrorTask(unittest.TestCase): def _makeOne(self, channel=None, request=None): @@ -873,6 +1490,10 @@ port = 80 url_prefix = '' trusted_proxy = None + trusted_proxy_count = 1 + trusted_proxy_headers = set() + log_untrusted_proxy_headers = True + clear_untrusted_proxy_headers = True class DummyServer(object): server_name = 'localhost' @@ -885,7 +1506,7 @@ closed_when_done = False adj = DummyAdj() creation_time = 0 - addr = ['127.0.0.1'] + addr = ('127.0.0.1', 39830) def __init__(self, server=None): if server is None: @@ -924,8 +1545,8 @@ def __init__(self): self.logged = [] - def warning(self, msg): - self.logged.append(msg) + def warning(self, msg, *args): + self.logged.append(msg % args) def exception(self, msg): self.logged.append(msg) diff -Nru waitress-1.1.0/waitress/tests/test_trigger.py waitress-1.2.0~b2/waitress/tests/test_trigger.py --- waitress-1.1.0/waitress/tests/test_trigger.py 2017-10-11 00:59:52.000000000 +0000 +++ waitress-1.2.0~b2/waitress/tests/test_trigger.py 2019-01-02 20:46:09.000000000 +0000 @@ -8,15 +8,19 @@ def _makeOne(self, map): from waitress.trigger import trigger - return trigger(map) + self.inst = trigger(map) + return self.inst + + def tearDown(self): + self.inst.close() # prevent __del__ warning from file_dispatcher def test__close(self): map = {} inst = self._makeOne(map) - fd = os.open(os.path.abspath(__file__), os.O_RDONLY) - inst._fds = (fd,) + fd1, fd2 = inst._fds inst.close() - self.assertRaises(OSError, os.read, fd, 1) + self.assertRaises(OSError, os.read, fd1, 1) + self.assertRaises(OSError, os.read, fd2, 1) def test__physical_pull(self): map = {} diff -Nru waitress-1.1.0/waitress/tests/test_utilities.py waitress-1.2.0~b2/waitress/tests/test_utilities.py --- waitress-1.1.0/waitress/tests/test_utilities.py 2017-10-11 00:59:52.000000000 +0000 +++ waitress-1.2.0~b2/waitress/tests/test_utilities.py 2019-01-02 20:46:09.000000000 +0000 @@ -89,21 +89,6 @@ def test_mixed(self): self.assertEqual(self._callFUT(b'\n\n00\r\n\r\n'), 2) -class Test_logging_dispatcher(unittest.TestCase): - - def _makeOne(self): - from waitress.utilities import logging_dispatcher - return logging_dispatcher(map={}) - - def test_log_info(self): - import logging - inst = self._makeOne() - logger = DummyLogger() - inst.logger = logger - inst.log_info('message', 'warning') - self.assertEqual(logger.severity, logging.WARN) - self.assertEqual(logger.message, 'message') - class TestBadRequest(unittest.TestCase): def _makeOne(self): @@ -114,8 +99,36 @@ inst = self._makeOne() self.assertEqual(inst.body, 1) -class DummyLogger(object): +class Test_undquote(unittest.TestCase): + + def _callFUT(self, value): + from waitress.utilities import undquote + return undquote(value) + + def test_empty(self): + self.assertEqual(self._callFUT(''), '') + + def test_quoted(self): + self.assertEqual(self._callFUT('"test"'), 'test') + + def test_unquoted(self): + self.assertEqual(self._callFUT('test'), 'test') + + def test_quoted_backslash_quote(self): + self.assertEqual(self._callFUT('"\\""'), '"') + + def test_quoted_htab(self): + self.assertEqual(self._callFUT("\"\t\""), "\t") + + def test_quoted_backslash_htab(self): + self.assertEqual(self._callFUT("\"\\\t\""), "\t") + + def test_quoted_backslash_invalid(self): + self.assertRaises(ValueError, self._callFUT, '"\\"') + + def test_invalid_quoting(self): + self.assertRaises(ValueError, self._callFUT, '"test') + + def test_invalid_quoting_single_quote(self): + self.assertRaises(ValueError, self._callFUT, '"') - def log(self, severity, message): - self.severity = severity - self.message = message diff -Nru waitress-1.1.0/waitress/tests/test_wasyncore.py waitress-1.2.0~b2/waitress/tests/test_wasyncore.py --- waitress-1.1.0/waitress/tests/test_wasyncore.py 1970-01-01 00:00:00.000000000 +0000 +++ waitress-1.2.0~b2/waitress/tests/test_wasyncore.py 2019-01-02 20:46:09.000000000 +0000 @@ -0,0 +1,1661 @@ +from waitress import wasyncore as asyncore +from waitress import compat +import contextlib +import functools +import gc +import unittest +import select +import os +import socket +import sys +import time +import errno +import re +import struct +import threading +import warnings + +from io import BytesIO + +TIMEOUT = 3 +HAS_UNIX_SOCKETS = hasattr(socket, 'AF_UNIX') +HOST = 'localhost' +HOSTv4 = "127.0.0.1" +HOSTv6 = "::1" + +# Filename used for testing +if os.name == 'java': # pragma: no cover + # Jython disallows @ in module names + TESTFN = '$test' +else: + TESTFN = '@test' + +TESTFN = "{}_{}_tmp".format(TESTFN, os.getpid()) + +class DummyLogger(object): # pragma: no cover + def __init__(self): + self.messages = [] + + def log(self, severity, message): + self.messages.append((severity, message)) + +class WarningsRecorder(object): # pragma: no cover + """Convenience wrapper for the warnings list returned on + entry to the warnings.catch_warnings() context manager. + """ + def __init__(self, warnings_list): + self._warnings = warnings_list + self._last = 0 + + @property + def warnings(self): + return self._warnings[self._last:] + + def reset(self): + self._last = len(self._warnings) + + +def _filterwarnings(filters, quiet=False): # pragma: no cover + """Catch the warnings, then check if all the expected + warnings have been raised and re-raise unexpected warnings. + If 'quiet' is True, only re-raise the unexpected warnings. + """ + # Clear the warning registry of the calling module + # in order to re-raise the warnings. + frame = sys._getframe(2) + registry = frame.f_globals.get('__warningregistry__') + if registry: + registry.clear() + with warnings.catch_warnings(record=True) as w: + # Set filter "always" to record all warnings. Because + # test_warnings swap the module, we need to look up in + # the sys.modules dictionary. + sys.modules['warnings'].simplefilter("always") + yield WarningsRecorder(w) + # Filter the recorded warnings + reraise = list(w) + missing = [] + for msg, cat in filters: + seen = False + for w in reraise[:]: + warning = w.message + # Filter out the matching messages + if (re.match(msg, str(warning), re.I) and + issubclass(warning.__class__, cat)): + seen = True + reraise.remove(w) + if not seen and not quiet: + # This filter caught nothing + missing.append((msg, cat.__name__)) + if reraise: + raise AssertionError("unhandled warning %s" % reraise[0]) + if missing: + raise AssertionError("filter (%r, %s) did not catch any warning" % + missing[0]) + + +@contextlib.contextmanager +def check_warnings(*filters, **kwargs): # pragma: no cover + """Context manager to silence warnings. + + Accept 2-tuples as positional arguments: + ("message regexp", WarningCategory) + + Optional argument: + - if 'quiet' is True, it does not fail if a filter catches nothing + (default True without argument, + default False if some filters are defined) + + Without argument, it defaults to: + check_warnings(("", Warning), quiet=True) + """ + quiet = kwargs.get('quiet') + if not filters: + filters = (("", Warning),) + # Preserve backward compatibility + if quiet is None: + quiet = True + return _filterwarnings(filters, quiet) + +def gc_collect(): # pragma: no cover + """Force as many objects as possible to be collected. + + In non-CPython implementations of Python, this is needed because timely + deallocation is not guaranteed by the garbage collector. (Even in CPython + this can be the case in case of reference cycles.) This means that __del__ + methods may be called later than expected and weakrefs may remain alive for + longer than expected. This function tries its best to force all garbage + objects to disappear. + """ + gc.collect() + if sys.platform.startswith('java'): + time.sleep(0.1) + gc.collect() + gc.collect() + +def threading_setup(): # pragma: no cover + return (compat.thread._count(), None) + +def threading_cleanup(*original_values): # pragma: no cover + global environment_altered + + _MAX_COUNT = 100 + + for count in range(_MAX_COUNT): + values = (compat.thread._count(), None) + if values == original_values: + break + + if not count: + # Display a warning at the first iteration + environment_altered = True + sys.stderr.write( + "Warning -- threading_cleanup() failed to cleanup " + "%s threads" % (values[0] - original_values[0]) + ) + sys.stderr.flush() + + values = None + + time.sleep(0.01) + gc_collect() + + +def reap_threads(func): # pragma: no cover + """Use this function when threads are being used. This will + ensure that the threads are cleaned up even when the test fails. + """ + @functools.wraps(func) + def decorator(*args): + key = threading_setup() + try: + return func(*args) + finally: + threading_cleanup(*key) + return decorator + +def join_thread(thread, timeout=30.0): # pragma: no cover + """Join a thread. Raise an AssertionError if the thread is still alive + after timeout seconds. + """ + thread.join(timeout) + if thread.is_alive(): + msg = "failed to join the thread in %.1f seconds" % timeout + raise AssertionError(msg) + +def bind_port(sock, host=HOST): # pragma: no cover + """Bind the socket to a free port and return the port number. Relies on + ephemeral ports in order to ensure we are using an unbound port. This is + important as many tests may be running simultaneously, especially in a + buildbot environment. This method raises an exception if the sock.family + is AF_INET and sock.type is SOCK_STREAM, *and* the socket has SO_REUSEADDR + or SO_REUSEPORT set on it. Tests should *never* set these socket options + for TCP/IP sockets. The only case for setting these options is testing + multicasting via multiple UDP sockets. + + Additionally, if the SO_EXCLUSIVEADDRUSE socket option is available (i.e. + on Windows), it will be set on the socket. This will prevent anyone else + from bind()'ing to our host/port for the duration of the test. + """ + + if sock.family == socket.AF_INET and sock.type == socket.SOCK_STREAM: + if hasattr(socket, 'SO_REUSEADDR'): + if sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR) == 1: + raise RuntimeError("tests should never set the SO_REUSEADDR " \ + "socket option on TCP/IP sockets!") + if hasattr(socket, 'SO_REUSEPORT'): + try: + if sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT) == 1: + raise RuntimeError( + "tests should never set the SO_REUSEPORT " \ + "socket option on TCP/IP sockets!") + except OSError: + # Python's socket module was compiled using modern headers + # thus defining SO_REUSEPORT but this process is running + # under an older kernel that does not support SO_REUSEPORT. + pass + if hasattr(socket, 'SO_EXCLUSIVEADDRUSE'): + sock.setsockopt(socket.SOL_SOCKET, socket.SO_EXCLUSIVEADDRUSE, 1) + + sock.bind((host, 0)) + port = sock.getsockname()[1] + return port + +@contextlib.contextmanager +def closewrapper(sock): # pragma: no cover + try: + yield sock + finally: + sock.close() + +class dummysocket: # pragma: no cover + def __init__(self): + self.closed = False + + def close(self): + self.closed = True + + def fileno(self): + return 42 + + def setblocking(self, yesno): + self.isblocking = yesno + + def getpeername(self): + return 'peername' + +class dummychannel: # pragma: no cover + def __init__(self): + self.socket = dummysocket() + + def close(self): + self.socket.close() + +class exitingdummy: # pragma: no cover + def __init__(self): + pass + + def handle_read_event(self): + raise asyncore.ExitNow() + + handle_write_event = handle_read_event + handle_close = handle_read_event + handle_expt_event = handle_read_event + +class crashingdummy: + def __init__(self): + self.error_handled = False + + def handle_read_event(self): + raise Exception() + + handle_write_event = handle_read_event + handle_close = handle_read_event + handle_expt_event = handle_read_event + + def handle_error(self): + self.error_handled = True + +# used when testing senders; just collects what it gets until newline is sent +def capture_server(evt, buf, serv): # pragma no cover + try: + serv.listen(0) + conn, addr = serv.accept() + except socket.timeout: + pass + else: + n = 200 + start = time.time() + while n > 0 and time.time() - start < 3.0: + r, w, e = select.select([conn], [], [], 0.1) + if r: + n -= 1 + data = conn.recv(10) + # keep everything except for the newline terminator + buf.write(data.replace(b'\n', b'')) + if b'\n' in data: + break + time.sleep(0.01) + + conn.close() + finally: + serv.close() + evt.set() + +def bind_unix_socket(sock, addr): # pragma: no cover + """Bind a unix socket, raising SkipTest if PermissionError is raised.""" + assert sock.family == socket.AF_UNIX + try: + sock.bind(addr) + except PermissionError: + sock.close() + raise unittest.SkipTest('cannot bind AF_UNIX sockets') + +def bind_af_aware(sock, addr): + """Helper function to bind a socket according to its family.""" + if HAS_UNIX_SOCKETS and sock.family == socket.AF_UNIX: + # Make sure the path doesn't exist. + unlink(addr) + bind_unix_socket(sock, addr) + else: + sock.bind(addr) + +if sys.platform.startswith("win"): # pragma: no cover + def _waitfor(func, pathname, waitall=False): + # Perform the operation + func(pathname) + # Now setup the wait loop + if waitall: + dirname = pathname + else: + dirname, name = os.path.split(pathname) + dirname = dirname or '.' + # Check for `pathname` to be removed from the filesystem. + # The exponential backoff of the timeout amounts to a total + # of ~1 second after which the deletion is probably an error + # anyway. + # Testing on an i7@4.3GHz shows that usually only 1 iteration is + # required when contention occurs. + timeout = 0.001 + while timeout < 1.0: + # Note we are only testing for the existence of the file(s) in + # the contents of the directory regardless of any security or + # access rights. If we have made it this far, we have sufficient + # permissions to do that much using Python's equivalent of the + # Windows API FindFirstFile. + # Other Windows APIs can fail or give incorrect results when + # dealing with files that are pending deletion. + L = os.listdir(dirname) + if not (L if waitall else name in L): + return + # Increase the timeout and try again + time.sleep(timeout) + timeout *= 2 + warnings.warn('tests may fail, delete still pending for ' + pathname, + RuntimeWarning, stacklevel=4) + + def _unlink(filename): + _waitfor(os.unlink, filename) +else: + _unlink = os.unlink + + +def unlink(filename): + try: + _unlink(filename) + except OSError: + pass + +def _is_ipv6_enabled(): # pragma: no cover + """Check whether IPv6 is enabled on this host.""" + if compat.HAS_IPV6: + sock = None + try: + sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM) + sock.bind(('::1', 0)) + return True + except socket.error: + pass + finally: + if sock: + sock.close() + return False + +IPV6_ENABLED = _is_ipv6_enabled() + +class HelperFunctionTests(unittest.TestCase): + def test_readwriteexc(self): + # Check exception handling behavior of read, write and _exception + + # check that ExitNow exceptions in the object handler method + # bubbles all the way up through asyncore read/write/_exception calls + tr1 = exitingdummy() + self.assertRaises(asyncore.ExitNow, asyncore.read, tr1) + self.assertRaises(asyncore.ExitNow, asyncore.write, tr1) + self.assertRaises(asyncore.ExitNow, asyncore._exception, tr1) + + # check that an exception other than ExitNow in the object handler + # method causes the handle_error method to get called + tr2 = crashingdummy() + asyncore.read(tr2) + self.assertEqual(tr2.error_handled, True) + + tr2 = crashingdummy() + asyncore.write(tr2) + self.assertEqual(tr2.error_handled, True) + + tr2 = crashingdummy() + asyncore._exception(tr2) + self.assertEqual(tr2.error_handled, True) + + # asyncore.readwrite uses constants in the select module that + # are not present in Windows systems (see this thread: + # http://mail.python.org/pipermail/python-list/2001-October/109973.html) + # These constants should be present as long as poll is available + + @unittest.skipUnless(hasattr(select, 'poll'), 'select.poll required') + def test_readwrite(self): + # Check that correct methods are called by readwrite() + + attributes = ('read', 'expt', 'write', 'closed', 'error_handled') + + expected = ( + (select.POLLIN, 'read'), + (select.POLLPRI, 'expt'), + (select.POLLOUT, 'write'), + (select.POLLERR, 'closed'), + (select.POLLHUP, 'closed'), + (select.POLLNVAL, 'closed'), + ) + + class testobj: + def __init__(self): + self.read = False + self.write = False + self.closed = False + self.expt = False + self.error_handled = False + + def handle_read_event(self): + self.read = True + + def handle_write_event(self): + self.write = True + + def handle_close(self): + self.closed = True + + def handle_expt_event(self): + self.expt = True + + # def handle_error(self): + # self.error_handled = True + + for flag, expectedattr in expected: + tobj = testobj() + self.assertEqual(getattr(tobj, expectedattr), False) + asyncore.readwrite(tobj, flag) + + # Only the attribute modified by the routine we expect to be + # called should be True. + for attr in attributes: + self.assertEqual(getattr(tobj, attr), attr==expectedattr) + + # check that ExitNow exceptions in the object handler method + # bubbles all the way up through asyncore readwrite call + tr1 = exitingdummy() + self.assertRaises(asyncore.ExitNow, asyncore.readwrite, tr1, flag) + + # check that an exception other than ExitNow in the object handler + # method causes the handle_error method to get called + tr2 = crashingdummy() + self.assertEqual(tr2.error_handled, False) + asyncore.readwrite(tr2, flag) + self.assertEqual(tr2.error_handled, True) + + def test_closeall(self): + self.closeall_check(False) + + def test_closeall_default(self): + self.closeall_check(True) + + def closeall_check(self, usedefault): + # Check that close_all() closes everything in a given map + + l = [] + testmap = {} + for i in range(10): + c = dummychannel() + l.append(c) + self.assertEqual(c.socket.closed, False) + testmap[i] = c + + if usedefault: + socketmap = asyncore.socket_map + try: + asyncore.socket_map = testmap + asyncore.close_all() + finally: + testmap, asyncore.socket_map = asyncore.socket_map, socketmap + else: + asyncore.close_all(testmap) + + self.assertEqual(len(testmap), 0) + + for c in l: + self.assertEqual(c.socket.closed, True) + + def test_compact_traceback(self): + try: + raise Exception("I don't like spam!") + except: + real_t, real_v, real_tb = sys.exc_info() + r = asyncore.compact_traceback() + + (f, function, line), t, v, info = r + self.assertEqual(os.path.split(f)[-1], 'test_wasyncore.py') + self.assertEqual(function, 'test_compact_traceback') + self.assertEqual(t, real_t) + self.assertEqual(v, real_v) + self.assertEqual(info, '[%s|%s|%s]' % (f, function, line)) + + +class DispatcherTests(unittest.TestCase): + def setUp(self): + pass + + def tearDown(self): + asyncore.close_all() + + def test_basic(self): + d = asyncore.dispatcher() + self.assertEqual(d.readable(), True) + self.assertEqual(d.writable(), True) + + def test_repr(self): + d = asyncore.dispatcher() + self.assertEqual( + repr(d), + '' % id(d) + ) + + def test_log_info(self): + import logging + inst = asyncore.dispatcher(map={}) + logger = DummyLogger() + inst.logger = logger + inst.log_info('message', 'warning') + self.assertEqual(logger.messages, [(logging.WARN, 'message')]) + + def test_log(self): + import logging + inst = asyncore.dispatcher() + logger = DummyLogger() + inst.logger = logger + inst.log('message') + self.assertEqual(logger.messages, [(logging.DEBUG, 'message')]) + + def test_unhandled(self): + import logging + inst = asyncore.dispatcher() + logger = DummyLogger() + inst.logger = logger + + inst.handle_expt() + inst.handle_read() + inst.handle_write() + inst.handle_connect() + + expected = [(logging.WARN, 'unhandled incoming priority event'), + (logging.WARN, 'unhandled read event'), + (logging.WARN, 'unhandled write event'), + (logging.WARN, 'unhandled connect event')] + self.assertEqual(logger.messages, expected) + + def test_strerror(self): + # refers to bug #8573 + err = asyncore._strerror(errno.EPERM) + if hasattr(os, 'strerror'): + self.assertEqual(err, os.strerror(errno.EPERM)) + err = asyncore._strerror(-1) + self.assertTrue(err != "") + + +class dispatcherwithsend_noread(asyncore.dispatcher_with_send): # pragma: no cover + def readable(self): + return False + + def handle_connect(self): + pass + + +class DispatcherWithSendTests(unittest.TestCase): + def setUp(self): + pass + + def tearDown(self): + asyncore.close_all() + + @reap_threads + def test_send(self): + evt = threading.Event() + sock = socket.socket() + sock.settimeout(3) + port = bind_port(sock) + + cap = BytesIO() + args = (evt, cap, sock) + t = threading.Thread(target=capture_server, args=args) + t.start() + try: + # wait a little longer for the server to initialize (it sometimes + # refuses connections on slow machines without this wait) + time.sleep(0.2) + + data = b"Suppose there isn't a 16-ton weight?" + d = dispatcherwithsend_noread() + d.create_socket() + d.connect((HOST, port)) + + # give time for socket to connect + time.sleep(0.1) + + d.send(data) + d.send(data) + d.send(b'\n') + + n = 1000 + while d.out_buffer and n > 0: # pragma: no cover + asyncore.poll() + n -= 1 + + evt.wait() + + self.assertEqual(cap.getvalue(), data*2) + finally: + join_thread(t, timeout=TIMEOUT) + + +@unittest.skipUnless(hasattr(asyncore, 'file_wrapper'), + 'asyncore.file_wrapper required') +class FileWrapperTest(unittest.TestCase): + def setUp(self): + self.d = b"It's not dead, it's sleeping!" + with open(TESTFN, 'wb') as file: + file.write(self.d) + + def tearDown(self): + unlink(TESTFN) + + def test_recv(self): + fd = os.open(TESTFN, os.O_RDONLY) + w = asyncore.file_wrapper(fd) + os.close(fd) + + self.assertNotEqual(w.fd, fd) + self.assertNotEqual(w.fileno(), fd) + self.assertEqual(w.recv(13), b"It's not dead") + self.assertEqual(w.read(6), b", it's") + w.close() + self.assertRaises(OSError, w.read, 1) + + def test_send(self): + d1 = b"Come again?" + d2 = b"I want to buy some cheese." + fd = os.open(TESTFN, os.O_WRONLY | os.O_APPEND) + w = asyncore.file_wrapper(fd) + os.close(fd) + + w.write(d1) + w.send(d2) + w.close() + with open(TESTFN, 'rb') as file: + self.assertEqual(file.read(), self.d + d1 + d2) + + @unittest.skipUnless(hasattr(asyncore, 'file_dispatcher'), + 'asyncore.file_dispatcher required') + def test_dispatcher(self): + fd = os.open(TESTFN, os.O_RDONLY) + data = [] + class FileDispatcher(asyncore.file_dispatcher): + def handle_read(self): + data.append(self.recv(29)) + FileDispatcher(fd) + os.close(fd) + asyncore.loop(timeout=0.01, use_poll=True, count=2) + self.assertEqual(b"".join(data), self.d) + + def test_resource_warning(self): + # Issue #11453 + got_warning = False + while got_warning is False: + # we try until we get the outcome we want because this + # test is not deterministic (gc_collect() may not + fd = os.open(TESTFN, os.O_RDONLY) + f = asyncore.file_wrapper(fd) + + os.close(fd) + + try: + with check_warnings(('', compat.ResourceWarning)): + f = None + gc_collect() + except AssertionError: # pragma: no cover + pass + else: + got_warning = True + + def test_close_twice(self): + fd = os.open(TESTFN, os.O_RDONLY) + f = asyncore.file_wrapper(fd) + os.close(fd) + + os.close(f.fd) # file_wrapper dupped fd + with self.assertRaises(OSError): + f.close() + + self.assertEqual(f.fd, -1) + # calling close twice should not fail + f.close() + + +class BaseTestHandler(asyncore.dispatcher): # pragma: no cover + + def __init__(self, sock=None): + asyncore.dispatcher.__init__(self, sock) + self.flag = False + + def handle_accept(self): + raise Exception("handle_accept not supposed to be called") + + def handle_accepted(self): + raise Exception("handle_accepted not supposed to be called") + + def handle_connect(self): + raise Exception("handle_connect not supposed to be called") + + def handle_expt(self): + raise Exception("handle_expt not supposed to be called") + + def handle_close(self): + raise Exception("handle_close not supposed to be called") + + def handle_error(self): + raise + + +class BaseServer(asyncore.dispatcher): + """A server which listens on an address and dispatches the + connection to a handler. + """ + + def __init__(self, family, addr, handler=BaseTestHandler): + asyncore.dispatcher.__init__(self) + self.create_socket(family) + self.set_reuse_addr() + bind_af_aware(self.socket, addr) + self.listen(5) + self.handler = handler + + @property + def address(self): + return self.socket.getsockname() + + def handle_accepted(self, sock, addr): + self.handler(sock) + + def handle_error(self): # pragma: no cover + raise + + +class BaseClient(BaseTestHandler): + + def __init__(self, family, address): + BaseTestHandler.__init__(self) + self.create_socket(family) + self.connect(address) + + def handle_connect(self): + pass + + +class BaseTestAPI: + + def tearDown(self): + asyncore.close_all(ignore_all=True) + + def loop_waiting_for_flag(self, instance, timeout=5): # pragma: no cover + timeout = float(timeout) / 100 + count = 100 + while asyncore.socket_map and count > 0: + asyncore.loop(timeout=0.01, count=1, use_poll=self.use_poll) + if instance.flag: + return + count -= 1 + time.sleep(timeout) + self.fail("flag not set") + + def test_handle_connect(self): + # make sure handle_connect is called on connect() + + class TestClient(BaseClient): + def handle_connect(self): + self.flag = True + + server = BaseServer(self.family, self.addr) + client = TestClient(self.family, server.address) + self.loop_waiting_for_flag(client) + + def test_handle_accept(self): + # make sure handle_accept() is called when a client connects + + class TestListener(BaseTestHandler): + + def __init__(self, family, addr): + BaseTestHandler.__init__(self) + self.create_socket(family) + bind_af_aware(self.socket, addr) + self.listen(5) + self.address = self.socket.getsockname() + + def handle_accept(self): + self.flag = True + + server = TestListener(self.family, self.addr) + client = BaseClient(self.family, server.address) + self.loop_waiting_for_flag(server) + + def test_handle_accepted(self): + # make sure handle_accepted() is called when a client connects + + class TestListener(BaseTestHandler): + + def __init__(self, family, addr): + BaseTestHandler.__init__(self) + self.create_socket(family) + bind_af_aware(self.socket, addr) + self.listen(5) + self.address = self.socket.getsockname() + + def handle_accept(self): + asyncore.dispatcher.handle_accept(self) + + def handle_accepted(self, sock, addr): + sock.close() + self.flag = True + + server = TestListener(self.family, self.addr) + client = BaseClient(self.family, server.address) + self.loop_waiting_for_flag(server) + + + def test_handle_read(self): + # make sure handle_read is called on data received + + class TestClient(BaseClient): + def handle_read(self): + self.flag = True + + class TestHandler(BaseTestHandler): + def __init__(self, conn): + BaseTestHandler.__init__(self, conn) + self.send(b'x' * 1024) + + server = BaseServer(self.family, self.addr, TestHandler) + client = TestClient(self.family, server.address) + self.loop_waiting_for_flag(client) + + def test_handle_write(self): + # make sure handle_write is called + + class TestClient(BaseClient): + def handle_write(self): + self.flag = True + + server = BaseServer(self.family, self.addr) + client = TestClient(self.family, server.address) + self.loop_waiting_for_flag(client) + + def test_handle_close(self): + # make sure handle_close is called when the other end closes + # the connection + + class TestClient(BaseClient): + + def handle_read(self): + # in order to make handle_close be called we are supposed + # to make at least one recv() call + self.recv(1024) + + def handle_close(self): + self.flag = True + self.close() + + class TestHandler(BaseTestHandler): + def __init__(self, conn): + BaseTestHandler.__init__(self, conn) + self.close() + + server = BaseServer(self.family, self.addr, TestHandler) + client = TestClient(self.family, server.address) + self.loop_waiting_for_flag(client) + + def test_handle_close_after_conn_broken(self): + # Check that ECONNRESET/EPIPE is correctly handled (issues #5661 and + # #11265). + + data = b'\0' * 128 + + class TestClient(BaseClient): + + def handle_write(self): + self.send(data) + + def handle_close(self): + self.flag = True + self.close() + + def handle_expt(self): # pragma: no cover + # needs to exist for MacOS testing + self.flag = True + self.close() + + class TestHandler(BaseTestHandler): + + def handle_read(self): + self.recv(len(data)) + self.close() + + def writable(self): + return False + + server = BaseServer(self.family, self.addr, TestHandler) + client = TestClient(self.family, server.address) + self.loop_waiting_for_flag(client) + + @unittest.skipIf(sys.platform.startswith("sunos"), + "OOB support is broken on Solaris") + def test_handle_expt(self): + # Make sure handle_expt is called on OOB data received. + # Note: this might fail on some platforms as OOB data is + # tenuously supported and rarely used. + if HAS_UNIX_SOCKETS and self.family == socket.AF_UNIX: + self.skipTest("Not applicable to AF_UNIX sockets.") + + if sys.platform == "darwin" and self.use_poll: # pragma: no cover + self.skipTest("poll may fail on macOS; see issue #28087") + + class TestClient(BaseClient): + def handle_expt(self): + self.socket.recv(1024, socket.MSG_OOB) + self.flag = True + + class TestHandler(BaseTestHandler): + def __init__(self, conn): + BaseTestHandler.__init__(self, conn) + self.socket.send( + compat.tobytes(chr(244)), socket.MSG_OOB + ) + + server = BaseServer(self.family, self.addr, TestHandler) + client = TestClient(self.family, server.address) + self.loop_waiting_for_flag(client) + + def test_handle_error(self): + + class TestClient(BaseClient): + def handle_write(self): + 1.0 / 0 + def handle_error(self): + self.flag = True + try: + raise + except ZeroDivisionError: + pass + else: # pragma: no cover + raise Exception("exception not raised") + + server = BaseServer(self.family, self.addr) + client = TestClient(self.family, server.address) + self.loop_waiting_for_flag(client) + + def test_connection_attributes(self): + server = BaseServer(self.family, self.addr) + client = BaseClient(self.family, server.address) + + # we start disconnected + self.assertFalse(server.connected) + self.assertTrue(server.accepting) + # this can't be taken for granted across all platforms + #self.assertFalse(client.connected) + self.assertFalse(client.accepting) + + # execute some loops so that client connects to server + asyncore.loop(timeout=0.01, use_poll=self.use_poll, count=100) + self.assertFalse(server.connected) + self.assertTrue(server.accepting) + self.assertTrue(client.connected) + self.assertFalse(client.accepting) + + # disconnect the client + client.close() + self.assertFalse(server.connected) + self.assertTrue(server.accepting) + self.assertFalse(client.connected) + self.assertFalse(client.accepting) + + # stop serving + server.close() + self.assertFalse(server.connected) + self.assertFalse(server.accepting) + + def test_create_socket(self): + s = asyncore.dispatcher() + s.create_socket(self.family) + #self.assertEqual(s.socket.type, socket.SOCK_STREAM) + self.assertEqual(s.socket.family, self.family) + self.assertEqual(s.socket.gettimeout(), 0) + #self.assertFalse(s.socket.get_inheritable()) + + def test_bind(self): + if HAS_UNIX_SOCKETS and self.family == socket.AF_UNIX: + self.skipTest("Not applicable to AF_UNIX sockets.") + s1 = asyncore.dispatcher() + s1.create_socket(self.family) + s1.bind(self.addr) + s1.listen(5) + port = s1.socket.getsockname()[1] + + s2 = asyncore.dispatcher() + s2.create_socket(self.family) + # EADDRINUSE indicates the socket was correctly bound + self.assertRaises(socket.error, s2.bind, (self.addr[0], port)) + + def test_set_reuse_addr(self): # pragma: no cover + if HAS_UNIX_SOCKETS and self.family == socket.AF_UNIX: + self.skipTest("Not applicable to AF_UNIX sockets.") + + with closewrapper(socket.socket(self.family)) as sock: + try: + sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) + except OSError: + unittest.skip("SO_REUSEADDR not supported on this platform") + else: + # if SO_REUSEADDR succeeded for sock we expect asyncore + # to do the same + s = asyncore.dispatcher(socket.socket(self.family)) + self.assertFalse(s.socket.getsockopt(socket.SOL_SOCKET, + socket.SO_REUSEADDR)) + s.socket.close() + s.create_socket(self.family) + s.set_reuse_addr() + self.assertTrue(s.socket.getsockopt(socket.SOL_SOCKET, + socket.SO_REUSEADDR)) + + @reap_threads + def test_quick_connect(self): # pragma: no cover + # see: http://bugs.python.org/issue10340 + if self.family not in (socket.AF_INET, + getattr(socket, "AF_INET6", object())): + self.skipTest("test specific to AF_INET and AF_INET6") + + server = BaseServer(self.family, self.addr) + # run the thread 500 ms: the socket should be connected in 200 ms + t = threading.Thread(target=lambda: asyncore.loop(timeout=0.1, + count=5)) + t.start() + try: + sock = socket.socket(self.family, socket.SOCK_STREAM) + with closewrapper(sock) as s: + s.settimeout(.2) + s.setsockopt(socket.SOL_SOCKET, socket.SO_LINGER, + struct.pack('ii', 1, 0)) + + try: + s.connect(server.address) + except OSError: + pass + finally: + join_thread(t, timeout=TIMEOUT) + +class TestAPI_UseIPv4Sockets(BaseTestAPI): + family = socket.AF_INET + addr = (HOST, 0) + +@unittest.skipUnless(IPV6_ENABLED, 'IPv6 support required') +class TestAPI_UseIPv6Sockets(BaseTestAPI): + family = socket.AF_INET6 + addr = (HOSTv6, 0) + +@unittest.skipUnless(HAS_UNIX_SOCKETS, 'Unix sockets required') +class TestAPI_UseUnixSockets(BaseTestAPI): + if HAS_UNIX_SOCKETS: + family = socket.AF_UNIX + addr = TESTFN + + def tearDown(self): + unlink(self.addr) + BaseTestAPI.tearDown(self) + +class TestAPI_UseIPv4Select(TestAPI_UseIPv4Sockets, unittest.TestCase): + use_poll = False + +@unittest.skipUnless(hasattr(select, 'poll'), 'select.poll required') +class TestAPI_UseIPv4Poll(TestAPI_UseIPv4Sockets, unittest.TestCase): + use_poll = True + +class TestAPI_UseIPv6Select(TestAPI_UseIPv6Sockets, unittest.TestCase): + use_poll = False + +@unittest.skipUnless(hasattr(select, 'poll'), 'select.poll required') +class TestAPI_UseIPv6Poll(TestAPI_UseIPv6Sockets, unittest.TestCase): + use_poll = True + +class TestAPI_UseUnixSocketsSelect(TestAPI_UseUnixSockets, unittest.TestCase): + use_poll = False + +@unittest.skipUnless(hasattr(select, 'poll'), 'select.poll required') +class TestAPI_UseUnixSocketsPoll(TestAPI_UseUnixSockets, unittest.TestCase): + use_poll = True + +class Test__strerror(unittest.TestCase): + def _callFUT(self, err): + from waitress.wasyncore import _strerror + return _strerror(err) + + def test_gardenpath(self): + self.assertEqual(self._callFUT(1), 'Operation not permitted') + + def test_unknown(self): + self.assertEqual(self._callFUT('wut'), 'Unknown error wut') + +class Test_read(unittest.TestCase): + def _callFUT(self, dispatcher): + from waitress.wasyncore import read + return read(dispatcher) + + def test_gardenpath(self): + inst = DummyDispatcher() + self._callFUT(inst) + self.assertTrue(inst.read_event_handled) + self.assertFalse(inst.error_handled) + + def test_reraised(self): + from waitress.wasyncore import ExitNow + inst = DummyDispatcher(ExitNow) + self.assertRaises(ExitNow,self._callFUT, inst) + self.assertTrue(inst.read_event_handled) + self.assertFalse(inst.error_handled) + + def test_non_reraised(self): + inst = DummyDispatcher(OSError) + self._callFUT(inst) + self.assertTrue(inst.read_event_handled) + self.assertTrue(inst.error_handled) + +class Test_write(unittest.TestCase): + def _callFUT(self, dispatcher): + from waitress.wasyncore import write + return write(dispatcher) + + def test_gardenpath(self): + inst = DummyDispatcher() + self._callFUT(inst) + self.assertTrue(inst.write_event_handled) + self.assertFalse(inst.error_handled) + + def test_reraised(self): + from waitress.wasyncore import ExitNow + inst = DummyDispatcher(ExitNow) + self.assertRaises(ExitNow,self._callFUT, inst) + self.assertTrue(inst.write_event_handled) + self.assertFalse(inst.error_handled) + + def test_non_reraised(self): + inst = DummyDispatcher(OSError) + self._callFUT(inst) + self.assertTrue(inst.write_event_handled) + self.assertTrue(inst.error_handled) + +class Test__exception(unittest.TestCase): + def _callFUT(self, dispatcher): + from waitress.wasyncore import _exception + return _exception(dispatcher) + + def test_gardenpath(self): + inst = DummyDispatcher() + self._callFUT(inst) + self.assertTrue(inst.expt_event_handled) + self.assertFalse(inst.error_handled) + + def test_reraised(self): + from waitress.wasyncore import ExitNow + inst = DummyDispatcher(ExitNow) + self.assertRaises(ExitNow,self._callFUT, inst) + self.assertTrue(inst.expt_event_handled) + self.assertFalse(inst.error_handled) + + def test_non_reraised(self): + inst = DummyDispatcher(OSError) + self._callFUT(inst) + self.assertTrue(inst.expt_event_handled) + self.assertTrue(inst.error_handled) + +@unittest.skipUnless(hasattr(select, 'poll'), 'select.poll required') +class Test_readwrite(unittest.TestCase): + def _callFUT(self, obj, flags): + from waitress.wasyncore import readwrite + return readwrite(obj, flags) + + def test_handle_read_event(self): + flags = 0 + flags |= select.POLLIN + inst = DummyDispatcher() + self._callFUT(inst, flags) + self.assertTrue(inst.read_event_handled) + + def test_handle_write_event(self): + flags = 0 + flags |= select.POLLOUT + inst = DummyDispatcher() + self._callFUT(inst, flags) + self.assertTrue(inst.write_event_handled) + + def test_handle_expt_event(self): + flags = 0 + flags |= select.POLLPRI + inst = DummyDispatcher() + self._callFUT(inst, flags) + self.assertTrue(inst.expt_event_handled) + + def test_handle_close(self): + flags = 0 + flags |= select.POLLHUP + inst = DummyDispatcher() + self._callFUT(inst, flags) + self.assertTrue(inst.close_handled) + + def test_socketerror_not_in_disconnected(self): + flags = 0 + flags |= select.POLLIN + inst = DummyDispatcher(socket.error(errno.EALREADY, 'EALREADY')) + self._callFUT(inst, flags) + self.assertTrue(inst.read_event_handled) + self.assertTrue(inst.error_handled) + + def test_socketerror_in_disconnected(self): + flags = 0 + flags |= select.POLLIN + inst = DummyDispatcher(socket.error(errno.ECONNRESET, 'ECONNRESET')) + self._callFUT(inst, flags) + self.assertTrue(inst.read_event_handled) + self.assertTrue(inst.close_handled) + + def test_exception_in_reraised(self): + from waitress import wasyncore + flags = 0 + flags |= select.POLLIN + inst = DummyDispatcher(wasyncore.ExitNow) + self.assertRaises(wasyncore.ExitNow, self._callFUT, inst, flags) + self.assertTrue(inst.read_event_handled) + + def test_exception_not_in_reraised(self): + flags = 0 + flags |= select.POLLIN + inst = DummyDispatcher(ValueError) + self._callFUT(inst, flags) + self.assertTrue(inst.error_handled) + +class Test_poll(unittest.TestCase): + def _callFUT(self, timeout=0.0, map=None): + from waitress.wasyncore import poll + return poll(timeout, map) + + def test_nothing_writable_nothing_readable_but_map_not_empty(self): + # i read the mock.patch docs. nerp. + dummy_time = DummyTime() + map = {0:DummyDispatcher()} + try: + from waitress import wasyncore + old_time = wasyncore.time + wasyncore.time = dummy_time + result = self._callFUT(map=map) + finally: + wasyncore.time = old_time + self.assertEqual(result, None) + self.assertEqual(dummy_time.sleepvals, [0.0]) + + def test_select_raises_EINTR(self): + # i read the mock.patch docs. nerp. + dummy_select = DummySelect(select.error(errno.EINTR)) + disp = DummyDispatcher() + disp.readable = lambda: True + map = {0:disp} + try: + from waitress import wasyncore + old_select = wasyncore.select + wasyncore.select = dummy_select + result = self._callFUT(map=map) + finally: + wasyncore.select = old_select + self.assertEqual(result, None) + self.assertEqual(dummy_select.selected, [([0], [], [0], 0.0)]) + + def test_select_raises_non_EINTR(self): + # i read the mock.patch docs. nerp. + dummy_select = DummySelect(select.error(errno.EBADF)) + disp = DummyDispatcher() + disp.readable = lambda: True + map = {0:disp} + try: + from waitress import wasyncore + old_select = wasyncore.select + wasyncore.select = dummy_select + self.assertRaises(select.error, self._callFUT, map=map) + finally: + wasyncore.select = old_select + self.assertEqual(dummy_select.selected, [([0], [], [0], 0.0)]) + +class Test_poll2(unittest.TestCase): + def _callFUT(self, timeout=0.0, map=None): + from waitress.wasyncore import poll2 + return poll2(timeout, map) + + def test_select_raises_EINTR(self): + # i read the mock.patch docs. nerp. + pollster = DummyPollster(exc=select.error(errno.EINTR)) + dummy_select = DummySelect(pollster=pollster) + disp = DummyDispatcher() + map = {0:disp} + try: + from waitress import wasyncore + old_select = wasyncore.select + wasyncore.select = dummy_select + self._callFUT(map=map) + finally: + wasyncore.select = old_select + self.assertEqual(pollster.polled, [0.0]) + + def test_select_raises_non_EINTR(self): + # i read the mock.patch docs. nerp. + pollster = DummyPollster(exc=select.error(errno.EBADF)) + dummy_select = DummySelect(pollster=pollster) + disp = DummyDispatcher() + map = {0:disp} + try: + from waitress import wasyncore + old_select = wasyncore.select + wasyncore.select = dummy_select + self.assertRaises(select.error, self._callFUT, map=map) + finally: + wasyncore.select = old_select + self.assertEqual(pollster.polled, [0.0]) + +class Test_dispatcher(unittest.TestCase): + def _makeOne(self, sock=None, map=None): + from waitress.wasyncore import dispatcher + return dispatcher(sock=sock, map=map) + + def test_unexpected_getpeername_exc(self): + sock = dummysocket() + def getpeername(): + raise socket.error(errno.EBADF) + map = {} + sock.getpeername = getpeername + self.assertRaises(socket.error, self._makeOne, sock=sock, map=map) + self.assertEqual(map, {}) + + def test___repr__accepting(self): + sock = dummysocket() + map = {} + inst = self._makeOne(sock=sock, map=map) + inst.accepting = True + inst.addr = ('localhost', 8080) + result = repr(inst) + expected = ' + +# ====================================================================== +# Copyright 1996 by Sam Rushing +# +# All Rights Reserved +# +# Permission to use, copy, modify, and distribute this software and +# its documentation for any purpose and without fee is hereby +# granted, provided that the above copyright notice appear in all +# copies and that both that copyright notice and this permission +# notice appear in supporting documentation, and that the name of Sam +# Rushing not be used in advertising or publicity pertaining to +# distribution of the software without specific, written prior +# permission. +# +# SAM RUSHING DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, +# INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN +# NO EVENT SHALL SAM RUSHING BE LIABLE FOR ANY SPECIAL, INDIRECT OR +# CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS +# OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, +# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN +# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +# ====================================================================== + +"""Basic infrastructure for asynchronous socket service clients and servers. + +There are only two ways to have a program on a single processor do "more +than one thing at a time". Multi-threaded programming is the simplest and +most popular way to do it, but there is another very different technique, +that lets you have nearly all the advantages of multi-threading, without +actually using multiple threads. it's really only practical if your program +is largely I/O bound. If your program is CPU bound, then pre-emptive +scheduled threads are probably what you really need. Network servers are +rarely CPU-bound, however. + +If your operating system supports the select() system call in its I/O +library (and nearly all do), then you can use it to juggle multiple +communication channels at once; doing other work while your I/O is taking +place in the "background." Although this strategy can seem strange and +complex, especially at first, it is in many ways easier to understand and +control than multi-threaded programming. The module documented here solves +many of the difficult problems for you, making the task of building +sophisticated high-performance network servers and clients a snap. + +NB: this is a fork of asyncore from the stdlib that we've (the waitress +developers) named 'wasyncore' to ensure forward compatibility, as asyncore +in the stdlib will be dropped soon. It is neither a copy of the 2.7 asyncore +nor the 3.X asyncore; it is a version compatible with either 2.7 or 3.X. +""" + +from . import compat +from . import utilities + +import logging +import select +import socket +import sys +import time +import warnings + +import os +from errno import EALREADY, EINPROGRESS, EWOULDBLOCK, ECONNRESET, EINVAL, \ + ENOTCONN, ESHUTDOWN, EISCONN, EBADF, ECONNABORTED, EPIPE, EAGAIN, EINTR, \ + errorcode + +_DISCONNECTED = frozenset({ECONNRESET, ENOTCONN, ESHUTDOWN, ECONNABORTED, EPIPE, + EBADF}) + +try: + socket_map +except NameError: + socket_map = {} + +def _strerror(err): + try: + return os.strerror(err) + except (TypeError, ValueError, OverflowError, NameError): + return "Unknown error %s" % err + +class ExitNow(Exception): + pass + +_reraised_exceptions = (ExitNow, KeyboardInterrupt, SystemExit) + +def read(obj): + try: + obj.handle_read_event() + except _reraised_exceptions: + raise + except: + obj.handle_error() + +def write(obj): + try: + obj.handle_write_event() + except _reraised_exceptions: + raise + except: + obj.handle_error() + +def _exception(obj): + try: + obj.handle_expt_event() + except _reraised_exceptions: + raise + except: + obj.handle_error() + +def readwrite(obj, flags): + try: + if flags & select.POLLIN: + obj.handle_read_event() + if flags & select.POLLOUT: + obj.handle_write_event() + if flags & select.POLLPRI: + obj.handle_expt_event() + if flags & (select.POLLHUP | select.POLLERR | select.POLLNVAL): + obj.handle_close() + except socket.error as e: + if e.args[0] not in _DISCONNECTED: + obj.handle_error() + else: + obj.handle_close() + except _reraised_exceptions: + raise + except: + obj.handle_error() + +def poll(timeout=0.0, map=None): + if map is None: # pragma: no cover + map = socket_map + if map: + r = []; w = []; e = [] + for fd, obj in list(map.items()): # list() call FBO py3 + is_r = obj.readable() + is_w = obj.writable() + if is_r: + r.append(fd) + # accepting sockets should not be writable + if is_w and not obj.accepting: + w.append(fd) + if is_r or is_w: + e.append(fd) + if [] == r == w == e: + time.sleep(timeout) + return + + try: + r, w, e = select.select(r, w, e, timeout) + except select.error as err: + if err.args[0] != EINTR: + raise + else: + return + + for fd in r: + obj = map.get(fd) + if obj is None: # pragma: no cover + continue + read(obj) + + for fd in w: + obj = map.get(fd) + if obj is None: # pragma: no cover + continue + write(obj) + + for fd in e: + obj = map.get(fd) + if obj is None: # pragma: no cover + continue + _exception(obj) + +def poll2(timeout=0.0, map=None): + # Use the poll() support added to the select module in Python 2.0 + if map is None: # pragma: no cover + map = socket_map + if timeout is not None: + # timeout is in milliseconds + timeout = int(timeout*1000) + pollster = select.poll() + if map: + for fd, obj in list(map.items()): + flags = 0 + if obj.readable(): + flags |= select.POLLIN | select.POLLPRI + # accepting sockets should not be writable + if obj.writable() and not obj.accepting: + flags |= select.POLLOUT + if flags: + pollster.register(fd, flags) + + try: + r = pollster.poll(timeout) + except select.error as err: + if err.args[0] != EINTR: + raise + r = [] + + for fd, flags in r: + obj = map.get(fd) + if obj is None: # pragma: no cover + continue + readwrite(obj, flags) + +poll3 = poll2 # Alias for backward compatibility + +def loop(timeout=30.0, use_poll=False, map=None, count=None): + if map is None: # pragma: no cover + map = socket_map + + if use_poll and hasattr(select, 'poll'): + poll_fun = poll2 + else: + poll_fun = poll + + if count is None: # pragma: no cover + while map: + poll_fun(timeout, map) + + else: + while map and count > 0: + poll_fun(timeout, map) + count = count - 1 + +def compact_traceback(): + t, v, tb = sys.exc_info() + tbinfo = [] + if not tb: # pragma: no cover + raise AssertionError("traceback does not exist") + while tb: + tbinfo.append(( + tb.tb_frame.f_code.co_filename, + tb.tb_frame.f_code.co_name, + str(tb.tb_lineno) + )) + tb = tb.tb_next + + # just to be safe + del tb + + file, function, line = tbinfo[-1] + info = ' '.join(['[%s|%s|%s]' % x for x in tbinfo]) + return (file, function, line), t, v, info + +class dispatcher: + + debug = False + connected = False + accepting = False + connecting = False + closing = False + addr = None + ignore_log_types = frozenset({'warning'}) + logger = utilities.logger + compact_traceback = staticmethod(compact_traceback) # for testing + + def __init__(self, sock=None, map=None): + if map is None: # pragma: no cover + self._map = socket_map + else: + self._map = map + + self._fileno = None + + if sock: + # Set to nonblocking just to make sure for cases where we + # get a socket from a blocking source. + sock.setblocking(0) + self.set_socket(sock, map) + self.connected = True + # The constructor no longer requires that the socket + # passed be connected. + try: + self.addr = sock.getpeername() + except socket.error as err: + if err.args[0] in (ENOTCONN, EINVAL): + # To handle the case where we got an unconnected + # socket. + self.connected = False + else: + # The socket is broken in some unknown way, alert + # the user and remove it from the map (to prevent + # polling of broken sockets). + self.del_channel(map) + raise + else: + self.socket = None + + def __repr__(self): + status = [self.__class__.__module__+"."+compat.qualname(self.__class__)] + if self.accepting and self.addr: + status.append('listening') + elif self.connected: + status.append('connected') + if self.addr is not None: + try: + status.append('%s:%d' % self.addr) + except TypeError: # pragma: no cover + status.append(repr(self.addr)) + return '<%s at %#x>' % (' '.join(status), id(self)) + + __str__ = __repr__ + + def add_channel(self, map=None): + #self.log_info('adding channel %s' % self) + if map is None: + map = self._map + map[self._fileno] = self + + def del_channel(self, map=None): + fd = self._fileno + if map is None: + map = self._map + if fd in map: + #self.log_info('closing channel %d:%s' % (fd, self)) + del map[fd] + self._fileno = None + + def create_socket(self, family=socket.AF_INET, type=socket.SOCK_STREAM): + self.family_and_type = family, type + sock = socket.socket(family, type) + sock.setblocking(0) + self.set_socket(sock) + + def set_socket(self, sock, map=None): + self.socket = sock + self._fileno = sock.fileno() + self.add_channel(map) + + def set_reuse_addr(self): + # try to re-use a server port if possible + try: + self.socket.setsockopt( + socket.SOL_SOCKET, socket.SO_REUSEADDR, + self.socket.getsockopt(socket.SOL_SOCKET, + socket.SO_REUSEADDR) | 1 + ) + except socket.error: + pass + + # ================================================== + # predicates for select() + # these are used as filters for the lists of sockets + # to pass to select(). + # ================================================== + + def readable(self): + return True + + def writable(self): + return True + + # ================================================== + # socket object methods. + # ================================================== + + def listen(self, num): + self.accepting = True + if os.name == 'nt' and num > 5: # pragma: no cover + num = 5 + return self.socket.listen(num) + + def bind(self, addr): + self.addr = addr + return self.socket.bind(addr) + + def connect(self, address): + self.connected = False + self.connecting = True + err = self.socket.connect_ex(address) + if err in (EINPROGRESS, EALREADY, EWOULDBLOCK) \ + or err == EINVAL and os.name == 'nt': # pragma: no cover + self.addr = address + return + if err in (0, EISCONN): + self.addr = address + self.handle_connect_event() + else: + raise socket.error(err, errorcode[err]) + + def accept(self): + # XXX can return either an address pair or None + try: + conn, addr = self.socket.accept() + except TypeError: + return None + except socket.error as why: + if why.args[0] in (EWOULDBLOCK, ECONNABORTED, EAGAIN): + return None + else: + raise + else: + return conn, addr + + def send(self, data): + try: + result = self.socket.send(data) + return result + except socket.error as why: + if why.args[0] == EWOULDBLOCK: + return 0 + elif why.args[0] in _DISCONNECTED: + self.handle_close() + return 0 + else: + raise + + def recv(self, buffer_size): + try: + data = self.socket.recv(buffer_size) + if not data: + # a closed connection is indicated by signaling + # a read condition, and having recv() return 0. + self.handle_close() + return b'' + else: + return data + except socket.error as why: + # winsock sometimes raises ENOTCONN + if why.args[0] in _DISCONNECTED: + self.handle_close() + return b'' + else: + raise + + def close(self): + self.connected = False + self.accepting = False + self.connecting = False + self.del_channel() + if self.socket is not None: + try: + self.socket.close() + except socket.error as why: + if why.args[0] not in (ENOTCONN, EBADF): + raise + + # log and log_info may be overridden to provide more sophisticated + # logging and warning methods. In general, log is for 'hit' logging + # and 'log_info' is for informational, warning and error logging. + + def log(self, message): + self.logger.log(logging.DEBUG, message) + + def log_info(self, message, type='info'): + severity = { + 'info': logging.INFO, + 'warning': logging.WARN, + 'error': logging.ERROR, + } + self.logger.log(severity.get(type, logging.INFO), message) + + def handle_read_event(self): + if self.accepting: + # accepting sockets are never connected, they "spawn" new + # sockets that are connected + self.handle_accept() + elif not self.connected: + if self.connecting: + self.handle_connect_event() + self.handle_read() + else: + self.handle_read() + + def handle_connect_event(self): + err = self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR) + if err != 0: + raise socket.error(err, _strerror(err)) + self.handle_connect() + self.connected = True + self.connecting = False + + def handle_write_event(self): + if self.accepting: + # Accepting sockets shouldn't get a write event. + # We will pretend it didn't happen. + return + + if not self.connected: + if self.connecting: + self.handle_connect_event() + self.handle_write() + + def handle_expt_event(self): + # handle_expt_event() is called if there might be an error on the + # socket, or if there is OOB data + # check for the error condition first + err = self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR) + if err != 0: + # we can get here when select.select() says that there is an + # exceptional condition on the socket + # since there is an error, we'll go ahead and close the socket + # like we would in a subclassed handle_read() that received no + # data + self.handle_close() + else: + self.handle_expt() + + def handle_error(self): + nil, t, v, tbinfo = self.compact_traceback() + + # sometimes a user repr method will crash. + try: + self_repr = repr(self) + except: # pragma: no cover + self_repr = '<__repr__(self) failed for object at %0x>' % id(self) + + self.log_info( + 'uncaptured python exception, closing channel %s (%s:%s %s)' % ( + self_repr, + t, + v, + tbinfo + ), + 'error' + ) + self.handle_close() + + def handle_expt(self): + self.log_info('unhandled incoming priority event', 'warning') + + def handle_read(self): + self.log_info('unhandled read event', 'warning') + + def handle_write(self): + self.log_info('unhandled write event', 'warning') + + def handle_connect(self): + self.log_info('unhandled connect event', 'warning') + + def handle_accept(self): + pair = self.accept() + if pair is not None: + self.handle_accepted(*pair) + + def handle_accepted(self, sock, addr): + sock.close() + self.log_info('unhandled accepted event', 'warning') + + def handle_close(self): + self.log_info('unhandled close event', 'warning') + self.close() + +# --------------------------------------------------------------------------- +# adds simple buffered output capability, useful for simple clients. +# [for more sophisticated usage use asynchat.async_chat] +# --------------------------------------------------------------------------- + +class dispatcher_with_send(dispatcher): + + def __init__(self, sock=None, map=None): + dispatcher.__init__(self, sock, map) + self.out_buffer = b'' + + def initiate_send(self): + num_sent = 0 + num_sent = dispatcher.send(self, self.out_buffer[:65536]) + self.out_buffer = self.out_buffer[num_sent:] + + handle_write = initiate_send + + def writable(self): + return (not self.connected) or len(self.out_buffer) + + def send(self, data): + if self.debug: # pragma: no cover + self.log_info('sending %s' % repr(data)) + self.out_buffer = self.out_buffer + data + self.initiate_send() + +def close_all(map=None, ignore_all=False): + if map is None: # pragma: no cover + map = socket_map + for x in list(map.values()): # list() FBO py3 + try: + x.close() + except socket.error as x: + if x.args[0] == EBADF: + pass + elif not ignore_all: + raise + except _reraised_exceptions: + raise + except: + if not ignore_all: + raise + map.clear() + +# Asynchronous File I/O: +# +# After a little research (reading man pages on various unixen, and +# digging through the linux kernel), I've determined that select() +# isn't meant for doing asynchronous file i/o. +# Heartening, though - reading linux/mm/filemap.c shows that linux +# supports asynchronous read-ahead. So _MOST_ of the time, the data +# will be sitting in memory for us already when we go to read it. +# +# What other OS's (besides NT) support async file i/o? [VMS?] +# +# Regardless, this is useful for pipes, and stdin/stdout... + +if os.name == 'posix': + class file_wrapper: + # Here we override just enough to make a file + # look like a socket for the purposes of asyncore. + # The passed fd is automatically os.dup()'d + + def __init__(self, fd): + self.fd = os.dup(fd) + + def __del__(self): + if self.fd >= 0: + warnings.warn("unclosed file %r" % self, compat.ResourceWarning) + self.close() + + def recv(self, *args): + return os.read(self.fd, *args) + + def send(self, *args): + return os.write(self.fd, *args) + + def getsockopt(self, level, optname, buflen=None): # pragma: no cover + if (level == socket.SOL_SOCKET and + optname == socket.SO_ERROR and + not buflen): + return 0 + raise NotImplementedError("Only asyncore specific behaviour " + "implemented.") + + read = recv + write = send + + def close(self): + if self.fd < 0: + return + fd = self.fd + self.fd = -1 + os.close(fd) + + def fileno(self): + return self.fd + + class file_dispatcher(dispatcher): + + def __init__(self, fd, map=None): + dispatcher.__init__(self, None, map) + self.connected = True + try: + fd = fd.fileno() + except AttributeError: + pass + self.set_file(fd) + # set it to non-blocking mode + compat.set_nonblocking(fd) + + def set_file(self, fd): + self.socket = file_wrapper(fd) + self._fileno = self.socket.fileno() + self.add_channel() +