diff -Nru python-pika-0.10.0/appveyor.yml python-pika-0.11.0/appveyor.yml --- python-pika-0.10.0/appveyor.yml 1970-01-01 00:00:00.000000000 +0000 +++ python-pika-0.11.0/appveyor.yml 2017-08-29 16:54:39.000000000 +0000 @@ -0,0 +1,86 @@ +# Windows build and test of Pika + +environment: + erlang_download_url: "http://erlang.org/download/otp_win64_18.3.exe" + erlang_exe_path: "C:\\Users\\appveyor\\erlang.exe" + erlang_home_dir: "C:\\Users\\appveyor\\erlang" + + rabbitmq_installer_download_url: "https://www.rabbitmq.com/releases/rabbitmq-server/v3.6.1/rabbitmq-server-3.6.1.exe" + rabbitmq_installer_path: "C:\\Users\\appveyor\\rabbitmq-server-3.6.1.exe" + + matrix: + - PYTHON_ARCH: "32" + PYTHONHOME: "C:\\Python27" + + +cache: + # RabbitMQ is a pretty big package, so caching it in hopes of expediting the + # runtime + - "%erlang_exe_path%" + - "%rabbitmq_installer_path%" + + +install: + - SET PYTHONPATH=%PYTHONHOME% + - SET PATH=%PYTHONHOME%\Scripts;%PYTHONHOME%;%PATH% + + # For diagnostics + - ECHO %PYTHONPATH% + - ECHO %PATH% + - python --version + + - ECHO Upgrading pip... + - python -m pip install --upgrade pip setuptools + - pip --version + + - ECHO Installing wheel... + - pip install wheel + + +build_script: + + - ECHO Building distributions... + - python setup.py sdist bdist bdist_wheel + - DIR /s *.whl + + +artifacts: + - path: 'dist\*.whl' + name: pika wheel + + +before_test: + # Install test requirements + + - ECHO Installing pika... + - python setup.py install + + - ECHO Installing pika test requirements... + - pip install -r test-requirements.txt + + # List conents of C:\ to help debug caching of rabbitmq artifacts + - DIR C:\ + + - ps: $webclient=New-Object System.Net.WebClient + + - ECHO Downloading Erlang... + - ps: if (-Not (Test-Path "$env:erlang_exe_path")) { $webclient.DownloadFile("$env:erlang_download_url", "$env:erlang_exe_path") } else { Write-Host "Found" $env:erlang_exe_path "in cache." } + + - ECHO Starting Erlang... + - start /B /WAIT %erlang_exe_path% /S /D=%erlang_home_dir% + - set ERLANG_HOME=%erlang_home_dir% + + - ECHO Downloading RabbitMQ... + - ps: if (-Not (Test-Path "$env:rabbitmq_installer_path")) { $webclient.DownloadFile("$env:rabbitmq_installer_download_url", "$env:rabbitmq_installer_path") } else { Write-Host "Found" $env:rabbitmq_installer_path "in cache." } + + - ECHO Installing and starting RabbitMQ with default config... + - start /B /WAIT %rabbitmq_installer_path% /S + - ps: (Get-Service -Name RabbitMQ).Status + + +test_script: + - nosetests + + +# Not deploying Windows builds yet TODO +deploy: false diff -Nru python-pika-0.10.0/CHANGELOG.rst python-pika-0.11.0/CHANGELOG.rst --- python-pika-0.10.0/CHANGELOG.rst 2015-09-02 17:29:24.000000000 +0000 +++ python-pika-0.11.0/CHANGELOG.rst 2017-08-29 16:54:39.000000000 +0000 @@ -1,6 +1,62 @@ +0.11.0 2017-07-29 +----------------- + +`GitHub milestone `_ + + - Simplify Travis CI configuration for OS X. + - Add `asyncio` connection adapter for Python 3.4 and newer. + - Connection failures that occur after the socket is opened and before the + AMQP connection is ready to go are now reported by calling the connection + error callback. Previously these were not consistently reported. + - In BaseConnection.close, call _handle_ioloop_stop only if the connection is + already closed to allow the asynchronous close operation to complete + gracefully. + - Pass error information from failed socket connection to user callbacks + on_open_error_callback and on_close_callback with result_code=-1. + - ValueError is raised when a completion callback is passed to an asynchronous + (nowait) Channel operation. It's an application error to pass a non-None + completion callback with an asynchronous request, because this callback can + never be serviced in the asynchronous scenario. + - `Channel.basic_reject` fixed to allow `delivery_tag` to be of type `long` + as well as `int`. (by quantum5) + - Implemented support for blocked connection timeouts in + `pika.connection.Connection`. This feature is available to all pika adapters. + See `pika.connection.ConnectionParameters` docstring to learn more about + `blocked_connection_timeout` configuration. + - Deprecated the `heartbeat_interval` arg in `pika.ConnectionParameters` in + favor of the `heartbeat` arg for consistency with the other connection + parameters classes `pika.connection.Parameters` and `pika.URLParameters`. + - When the `port` arg is not set explicitly in `ConnectionParameters` + constructor, but the `ssl` arg is set explicitly, then set the port value to + to the default AMQP SSL port if SSL is enabled, otherwise to the default + AMQP plaintext port. + - `URLParameters` will raise ValueError if a non-empty URL scheme other than + {amqp | amqps | http | https} is specified. + - `InvalidMinimumFrameSize` and `InvalidMaximumFrameSize` exceptions are + deprecated. pika.connection.Parameters.frame_max property setter now raises + the standard `ValueError` exception when the value is out of bounds. + - Removed deprecated parameter `type` in `Channel.exchange_declare` and + `BlockingChannel.exchange_declare` in favor of the `exchange_type` arg that + doesn't overshadow the builtin `type` keyword. + - Channel.close() on OPENING channel transitions it to CLOSING instead of + raising ChannelClosed. + - Channel.close() on CLOSING channel raises `ChannelAlreadyClosing`; used to + raise `ChannelClosed`. + - Connection.channel() raises `ConnectionClosed` if connection is not in OPEN + state. + - When performing graceful close on a channel and `Channel.Close` from broker + arrives while waiting for CloseOk, don't release the channel number until + CloseOk arrives to avoid race condition that may lead to a new channel + receiving the CloseOk that was destined for the closing channel. + - The `backpressure_detection` option of `ConnectionParameters` and + `URLParameters` property is DEPRECATED in favor of `Connection.Blocked` and + `Connection.Unblocked`. See `Connection.add_on_connection_blocked_callback`. + 0.10.0 2015-09-02 ----------------- +`0.10.0 `_ + - a9bf96d - LibevConnection: Fixed dict chgd size during iteration (Michael Laing) - 388c55d - SelectConnection: Fixed KeyError exceptions in IOLoop timeout executions (Shinji Suzuki) - 4780de3 - BlockingConnection: Add support to make BlockingConnection a Context Manager (@reddec) @@ -41,7 +97,7 @@ values for congruence with other similar callbacks. `BlockingConnection`: This adapter underwent a makeover under the hood and -gained significant performance improvements as well as ehnanced timer +gained significant performance improvements as well as enhanced timer resolution. It is now implemented as a client of the `SelectConnection` adapter. Below is an overview of the `BlockingConnection` and `BlockingChannel` API @@ -226,6 +282,8 @@ 0.9.14 - 2014-07-11 ------------------- +`0.9.14 `_ + - 57fe43e - fix test to generate a correct range of random ints (ml) - 0d68dee - fix async watcher for libev_connection (ml) - 01710ad - Use default username and password if not specified in URLParameters (Sean Dwyer) @@ -365,6 +423,9 @@ 0.9.13 - 2013-05-15 ------------------- + +`0.9.13 `_ + **Major Changes** - IPv6 Support with thanks to Alessandro Tagliapietra for initial prototype @@ -404,6 +465,8 @@ 0.9.12 - 2013-03-18 ------------------- +`0.9.12 `_ + **Bugfixes** - New timeout id hashing was not unique @@ -411,6 +474,8 @@ 0.9.11 - 2013-03-17 ------------------- +`0.9.11 `_ + **Bugfixes** - Address inconsistent channel close callback documentation and add the signature @@ -422,6 +487,8 @@ 0.9.10 - 2013-03-16 ------------------- +`0.9.10 `_ + **Bugfixes** - Fix timeout in twisted adapter (Submitted by cellscape) @@ -456,6 +523,8 @@ 0.9.9 - 2013-01-29 ------------------ +`0.9.9 `_ + **Bugfixes** - Only remove the tornado_connection.TornadoConnection file descriptor from the IOLoop if it's still open (Issue #221) @@ -479,6 +548,8 @@ 0.9.8 - 2012-11-18 ------------------ +`0.9.8 `_ + **Bugfixes** - Channel.queue_declare/BlockingChannel.queue_declare not setting up callbacks property for empty queue name (Issue #218) @@ -489,6 +560,8 @@ 0.9.7 - 2012-11-11 ------------------ +`0.9.7 `_ + **New features** - generator based consumer in BlockingChannel (See :doc:`examples/blocking_consumer_generator` for example) @@ -512,6 +585,8 @@ 0.9.6 - 2012-10-29 ------------------ +`0.9.6 `_ + **New features** - URLParameters @@ -537,6 +612,8 @@ 0.9.5 - 2011-03-29 ------------------ +`0.9.5 `_ + **Changelog** - Scope changes with adapter IOLoops and CallbackManager allowing for cleaner, multi-threaded operation diff -Nru python-pika-0.10.0/.checkignore python-pika-0.11.0/.checkignore --- python-pika-0.10.0/.checkignore 2015-09-02 17:29:24.000000000 +0000 +++ python-pika-0.11.0/.checkignore 2017-08-29 16:54:39.000000000 +0000 @@ -1,5 +1,5 @@ -tests -utils -examples -docs +**/docs +**/examples +**/test +**/utils setup.py diff -Nru python-pika-0.10.0/.codeclimate.yml python-pika-0.11.0/.codeclimate.yml --- python-pika-0.10.0/.codeclimate.yml 2015-09-02 17:29:24.000000000 +0000 +++ python-pika-0.11.0/.codeclimate.yml 2017-08-29 16:54:39.000000000 +0000 @@ -1,8 +1,8 @@ languages: - Python: true -exclude_paths: - - docs/* - - tests/* - - utils/* - - pika/examples/* - - pika/spec.py + - python +exclude_paths: + - docs/* + - tests/* + - utils/* + - pika/examples/* + - pika/spec.py diff -Nru python-pika-0.10.0/CONTRIBUTING.md python-pika-0.11.0/CONTRIBUTING.md --- python-pika-0.10.0/CONTRIBUTING.md 1970-01-01 00:00:00.000000000 +0000 +++ python-pika-0.11.0/CONTRIBUTING.md 2017-08-29 16:54:39.000000000 +0000 @@ -0,0 +1,45 @@ +# Contributing + +## Test Coverage + +To contribute to Pika, please make sure that any new features or changes +to existing functionality **include test coverage**. + +*Pull requests that add or change code without coverage have a much lower chance +of being accepted.* + + +## Prerequisites + +Pika test suite has a couple of requirements: + + * Dependencies from `test-dependencies.txt` are installed + * A RabbitMQ node with all defaults is running on `localhost:5672` + + +## Installing Dependencies + +To install the dependencies needed to run Pika tests, use + + pip install -r test-requirements.txt + +which on Python 3 might look like this + + pip3 install -r test-requirements.txt + + +## Running Tests + +To run all test suites, use + + nosetests + +Note that some tests are OS-specific (e.g. epoll on Linux +or kqueue on MacOS and BSD). Those will be skipped +automatically. + + +## Code Formatting + +Please format your code using [yapf](http://pypi.python.org/pypi/yapf) +with ``google`` style prior to issuing your pull request. diff -Nru python-pika-0.10.0/debian/changelog python-pika-0.11.0/debian/changelog --- python-pika-0.10.0/debian/changelog 2017-07-28 03:48:19.000000000 +0000 +++ python-pika-0.11.0/debian/changelog 2017-09-27 03:39:47.000000000 +0000 @@ -1,3 +1,15 @@ +python-pika (0.11.0-1) unstable; urgency=medium + + * Team upload. + * New upstream release (0.11.0) + * Update Homepage in debian/control + * Clean *.egg-info/* + * Build-depend on python3-sphinx, not python-sphinx + * Bump Standards-Version from 4.0.0 to 4.1.0 (no change required) + * Update debian/copyright Format: URL + + -- Christopher Hoskin Wed, 27 Sep 2017 04:39:47 +0100 + python-pika (0.10.0-3) unstable; urgency=medium * Team upload. diff -Nru python-pika-0.10.0/debian/clean python-pika-0.11.0/debian/clean --- python-pika-0.10.0/debian/clean 1970-01-01 00:00:00.000000000 +0000 +++ python-pika-0.11.0/debian/clean 2017-09-27 03:37:11.000000000 +0000 @@ -0,0 +1 @@ +*.egg-info/* diff -Nru python-pika-0.10.0/debian/control python-pika-0.11.0/debian/control --- python-pika-0.10.0/debian/control 2017-07-28 03:43:15.000000000 +0000 +++ python-pika-0.11.0/debian/control 2017-09-27 03:37:11.000000000 +0000 @@ -10,7 +10,7 @@ python-mock, python-nose, python-setuptools, - python-sphinx | python3-sphinx, + python3-sphinx, python-tornado, python-twisted, python-yaml, @@ -20,10 +20,10 @@ python3-tornado, python3-twisted, python3-yaml -Standards-Version: 4.0.0 +Standards-Version: 4.1.0 X-Python-Version: >= 2.6 X-Python3-Version: >= 3.4 -Homepage: http://pika.github.com/ +Homepage: http://pika.readthedocs.io/en/latest/ Vcs-Git: https://anonscm.debian.org/git/python-modules/packages/python-pika.git Vcs-Browser: https://anonscm.debian.org/cgit/python-modules/packages/python-pika.git diff -Nru python-pika-0.10.0/debian/copyright python-pika-0.11.0/debian/copyright --- python-pika-0.10.0/debian/copyright 2017-07-27 23:04:58.000000000 +0000 +++ python-pika-0.11.0/debian/copyright 2017-09-27 03:37:11.000000000 +0000 @@ -1,4 +1,4 @@ -Format: http://svn.debian.org/wsvn/dep/web/deps/dep5.mdwn?op=file&rev=174 +Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ Upstream-Name: pika Upstream-Contact: Gavin M. Roy Source: https://github.com/pika/pika diff -Nru python-pika-0.10.0/debian/.git-dpm python-pika-0.11.0/debian/.git-dpm --- python-pika-0.10.0/debian/.git-dpm 2017-07-27 23:04:58.000000000 +0000 +++ python-pika-0.11.0/debian/.git-dpm 2017-09-27 03:37:11.000000000 +0000 @@ -1,11 +1,11 @@ # see git-dpm(1) from git-dpm package -de39689cab64d0c9505636479632e9ae739f8e30 -de39689cab64d0c9505636479632e9ae739f8e30 -02aaec8506024857a5c0e715b122cbe0ecb78982 -02aaec8506024857a5c0e715b122cbe0ecb78982 -python-pika_0.10.0.orig.tar.gz -a18c032bf7d2b47c6aaa0b17d6ec02c66b2d95a9 -178327 +60eafcb3a71aa6a017e272bd7d1c185a7529b628 +60eafcb3a71aa6a017e272bd7d1c185a7529b628 +60eafcb3a71aa6a017e272bd7d1c185a7529b628 +60eafcb3a71aa6a017e272bd7d1c185a7529b628 +python-pika_0.11.0.orig.tar.gz +7c6e28833ed503431142076f814040b5ca30fc22 +218888 debianTag="debian/%e%v" patchedTag="patched/%e%v" upstreamTag="upstream/%e%u" diff -Nru python-pika-0.10.0/debian/patches/0001-Fix-the-channel-tests.patch python-pika-0.11.0/debian/patches/0001-Fix-the-channel-tests.patch --- python-pika-0.10.0/debian/patches/0001-Fix-the-channel-tests.patch 2017-07-27 23:04:58.000000000 +0000 +++ python-pika-0.11.0/debian/patches/0001-Fix-the-channel-tests.patch 1970-01-01 00:00:00.000000000 +0000 @@ -1,54 +0,0 @@ -From fd5e31cdae7cbaf9fb73a8c5c772fc2a17adea6d Mon Sep 17 00:00:00 2001 -From: "Gavin M. Roy" -Date: Wed, 4 Nov 2015 17:08:32 -0500 -Subject: Fix the channel tests - ---- - tests/unit/channel_tests.py | 25 +++++++++++++------------ - 1 file changed, 13 insertions(+), 12 deletions(-) - -diff --git a/tests/unit/channel_tests.py b/tests/unit/channel_tests.py -index 0ae9317..a80996a 100644 ---- a/tests/unit/channel_tests.py -+++ b/tests/unit/channel_tests.py -@@ -841,27 +841,28 @@ class ChannelTests(unittest.TestCase): - - def test_add_callbacks_basic_cancel_empty_added(self): - self.obj._add_callbacks() -- self.obj.callbacks.add.assert_any_calls(self.obj.channel_number, -- spec.Basic.Cancel, -- self.obj._on_getempty, False) -+ self.obj.callbacks.add.assert_any_call(self.obj.channel_number, -+ spec.Basic.Cancel, -+ self.obj._on_cancel, False) - - def test_add_callbacks_basic_get_empty_added(self): - self.obj._add_callbacks() -- self.obj.callbacks.add.assert_any_calls(self.obj.channel_number, -- spec.Basic.GetEmpty, -- self.obj._on_getempty, False) -+ print(self.obj.callbacks.add.__dict__) -+ self.obj.callbacks.add.assert_any_call(self.obj.channel_number, -+ spec.Basic.GetEmpty, -+ self.obj._on_getempty, False) - - def test_add_callbacks_channel_close_added(self): - self.obj._add_callbacks() -- self.obj.callbacks.add.assert_any_calls(self.obj.channel_number, -- spec.Channel.Close, -- self.obj._on_getempty, False) -+ self.obj.callbacks.add.assert_any_call(self.obj.channel_number, -+ spec.Channel.Close, -+ self.obj._on_close, True) - - def test_add_callbacks_channel_flow_added(self): - self.obj._add_callbacks() -- self.obj.callbacks.add.assert_any_calls(self.obj.channel_number, -- spec.Channel.Flow, -- self.obj._on_getempty, False) -+ self.obj.callbacks.add.assert_any_call(self.obj.channel_number, -+ spec.Channel.Flow, -+ self.obj._on_flow, False) - - def test_cleanup(self): - self.obj._cleanup() diff -Nru python-pika-0.10.0/debian/patches/0002-Make-the-test-more-reliable-in-Python-3.5.patch python-pika-0.11.0/debian/patches/0002-Make-the-test-more-reliable-in-Python-3.5.patch --- python-pika-0.10.0/debian/patches/0002-Make-the-test-more-reliable-in-Python-3.5.patch 2017-07-27 23:04:58.000000000 +0000 +++ python-pika-0.11.0/debian/patches/0002-Make-the-test-more-reliable-in-Python-3.5.patch 1970-01-01 00:00:00.000000000 +0000 @@ -1,27 +0,0 @@ -From de39689cab64d0c9505636479632e9ae739f8e30 Mon Sep 17 00:00:00 2001 -From: "Gavin M. Roy" -Date: Wed, 4 Nov 2015 17:00:40 -0500 -Subject: Make the test more reliable in Python 3.5 - ---- - tests/unit/heartbeat_tests.py | 7 +++++-- - 1 file changed, 5 insertions(+), 2 deletions(-) - -diff --git a/tests/unit/heartbeat_tests.py b/tests/unit/heartbeat_tests.py -index 9ec496f..62aa777 100644 ---- a/tests/unit/heartbeat_tests.py -+++ b/tests/unit/heartbeat_tests.py -@@ -152,8 +152,11 @@ class HeartbeatTests(unittest.TestCase): - self.assertIsInstance(self.obj._new_heartbeat_frame(), frame.Heartbeat) - - def test_send_heartbeat_send_frame_called(self): -- self.obj._send_heartbeat_frame() -- self.mock_conn._send_frame.assert_called_once() -+ frame_value = self.obj._new_heartbeat_frame() -+ with mock.patch.object(self.obj, '_new_heartbeat_frame') as new_frame: -+ new_frame.return_value = frame_value -+ self.obj._send_heartbeat_frame() -+ self.mock_conn._send_frame.assert_called_once_with(frame_value) - - def test_send_heartbeat_counter_incremented(self): - self.obj._send_heartbeat_frame() diff -Nru python-pika-0.10.0/debian/patches/series python-pika-0.11.0/debian/patches/series --- python-pika-0.10.0/debian/patches/series 2017-07-27 23:04:58.000000000 +0000 +++ python-pika-0.11.0/debian/patches/series 1970-01-01 00:00:00.000000000 +0000 @@ -1,2 +0,0 @@ -0001-Fix-the-channel-tests.patch -0002-Make-the-test-more-reliable-in-Python-3.5.patch diff -Nru python-pika-0.10.0/debian/source/options python-pika-0.11.0/debian/source/options --- python-pika-0.10.0/debian/source/options 1970-01-01 00:00:00.000000000 +0000 +++ python-pika-0.11.0/debian/source/options 2017-09-27 03:37:11.000000000 +0000 @@ -0,0 +1 @@ +extend-diff-ignore="^[^/]+\.egg-info/" diff -Nru python-pika-0.10.0/docs/examples/asynchronous_publisher_example.rst python-pika-0.11.0/docs/examples/asynchronous_publisher_example.rst --- python-pika-0.10.0/docs/examples/asynchronous_publisher_example.rst 2015-09-02 17:29:24.000000000 +0000 +++ python-pika-0.11.0/docs/examples/asynchronous_publisher_example.rst 2017-08-29 16:54:39.000000000 +0000 @@ -11,7 +11,7 @@ import json LOG_FORMAT = ('%(levelname) -10s %(asctime)s %(name) -30s %(funcName) ' - '-35s %(lineno) -5d: %(message)s') + '-35s %(lineno) -5d: %(message)s') LOGGER = logging.getLogger(__name__) @@ -43,13 +43,14 @@ """ self._connection = None self._channel = None - self._deliveries = [] - self._acked = 0 - self._nacked = 0 - self._message_number = 0 + + self._deliveries = None + self._acked = None + self._nacked = None + self._message_number = None + self._stopping = False self._url = amqp_url - self._closing = False def connect(self): """This method connects to RabbitMQ, returning the connection handle. @@ -63,7 +64,8 @@ """ LOGGER.info('Connecting to %s', self._url) return pika.SelectConnection(pika.URLParameters(self._url), - self.on_connection_open, + on_open_callback=self.on_connection_open, + on_close_callback=self.on_connection_closed, stop_ioloop_on_close=False) def on_connection_open(self, unused_connection): @@ -75,17 +77,8 @@ """ LOGGER.info('Connection opened') - self.add_on_connection_close_callback() self.open_channel() - def add_on_connection_close_callback(self): - """This method adds an on close callback that will be invoked by pika - when RabbitMQ closes the connection to the publisher unexpectedly. - - """ - LOGGER.info('Adding connection close callback') - self._connection.add_on_close_callback(self.on_connection_closed) - def on_connection_closed(self, connection, reply_code, reply_text): """This method is invoked by pika when the connection to RabbitMQ is closed unexpectedly. Since it is unexpected, we will reconnect to @@ -97,31 +90,12 @@ """ self._channel = None - if self._closing: + if self._stopping: self._connection.ioloop.stop() else: LOGGER.warning('Connection closed, reopening in 5 seconds: (%s) %s', reply_code, reply_text) - self._connection.add_timeout(5, self.reconnect) - - def reconnect(self): - """Will be invoked by the IOLoop timer if the connection is - closed. See the on_connection_closed method. - - """ - self._deliveries = [] - self._acked = 0 - self._nacked = 0 - self._message_number = 0 - - # This is the old connection IOLoop instance, stop its ioloop - self._connection.ioloop.stop() - - # Create a new connection - self._connection = self.connect() - - # There is now a new connection, needs a new ioloop to run - self._connection.ioloop.start() + self._connection.add_timeout(5, self._connection.ioloop.stop) def open_channel(self): """This method will open a new channel with RabbitMQ by issuing the @@ -162,13 +136,14 @@ different parameters. In this case, we'll close the connection to shutdown the object. - :param pika.channel.Channel: The closed channel + :param pika.channel.Channel channel: The closed channel :param int reply_code: The numeric reason the channel was closed :param str reply_text: The text reason the channel was closed """ LOGGER.warning('Channel was closed: (%s) %s', reply_code, reply_text) - if not self._closing: + self._channel = None + if not self._stopping: self._connection.close() def setup_exchange(self, exchange_name): @@ -282,8 +257,6 @@ message to be delivered in PUBLISH_INTERVAL seconds. """ - if self._stopping: - return LOGGER.info('Scheduling next message for %0.1f seconds', self.PUBLISH_INTERVAL) self._connection.add_timeout(self.PUBLISH_INTERVAL, @@ -302,16 +275,17 @@ class. """ - if self._stopping: + if self._channel is None or not self._channel.is_open: return - message = {u'مفتاح': u' قيمة', - u'键': u'值', - u'キー': u'値'} + hdrs = {u'مفتاح': u' قيمة', + u'键': u'值', + u'キー': u'値'} properties = pika.BasicProperties(app_id='example-publisher', content_type='application/json', - headers=message) + headers=hdrs) + message = u'مفتاح قيمة 键 值 キー 値' self._channel.basic_publish(self.EXCHANGE, self.ROUTING_KEY, json.dumps(message, ensure_ascii=False), properties) @@ -320,21 +294,28 @@ LOGGER.info('Published message # %i', self._message_number) self.schedule_next_message() - def close_channel(self): - """Invoke this command to close the channel with RabbitMQ by sending - the Channel.Close RPC command. - - """ - LOGGER.info('Closing the channel') - if self._channel: - self._channel.close() - def run(self): """Run the example code by connecting and then starting the IOLoop. """ - self._connection = self.connect() - self._connection.ioloop.start() + while not self._stopping: + self._connection = None + self._deliveries = [] + self._acked = 0 + self._nacked = 0 + self._message_number = 0 + + try: + self._connection = self.connect() + self._connection.ioloop.start() + except KeyboardInterrupt: + self.stop() + if (self._connection is not None and + not self._connection.is_closed): + # Finish closing + self._connection.ioloop.start() + + LOGGER.info('Stopped') def stop(self): """Stop the example by closing the channel and connection. We @@ -349,14 +330,21 @@ self._stopping = True self.close_channel() self.close_connection() - self._connection.ioloop.start() - LOGGER.info('Stopped') + + def close_channel(self): + """Invoke this command to close the channel with RabbitMQ by sending + the Channel.Close RPC command. + + """ + if self._channel is not None: + LOGGER.info('Closing the channel') + self._channel.close() def close_connection(self): """This method closes the connection to RabbitMQ.""" - LOGGER.info('Closing connection') - self._closing = True - self._connection.close() + if self._connection is not None: + LOGGER.info('Closing connection') + self._connection.close() def main(): @@ -364,11 +352,8 @@ # Connect to localhost:5672 as guest with the password guest and virtual host "/" (%2F) example = ExamplePublisher('amqp://guest:guest@localhost:5672/%2F?connection_attempts=3&heartbeat_interval=3600') - try: - example.run() - except KeyboardInterrupt: - example.stop() + example.run() + if __name__ == '__main__': main() - diff -Nru python-pika-0.10.0/docs/examples/asyncio_consumer.rst python-pika-0.11.0/docs/examples/asyncio_consumer.rst --- python-pika-0.10.0/docs/examples/asyncio_consumer.rst 1970-01-01 00:00:00.000000000 +0000 +++ python-pika-0.11.0/docs/examples/asyncio_consumer.rst 2017-08-29 16:54:39.000000000 +0000 @@ -0,0 +1,355 @@ +Asyncio Consumer +================ +The following example implements a consumer using the +:class:`Asyncio adapter ` for the +`Asyncio library `_ that will respond to RPC commands sent + from RabbitMQ. For example, it will reconnect if RabbitMQ closes the connection and will shutdown if + RabbitMQ cancels the consumer or closes the channel. While it may look intimidating, each method is + very short and represents a individual actions that a consumer can do. + +consumer.py:: + + from pika import adapters + import pika + import logging + + LOG_FORMAT = ('%(levelname) -10s %(asctime)s %(name) -30s %(funcName) ' + '-35s %(lineno) -5d: %(message)s') + + LOGGER = logging.getLogger(__name__) + + + class ExampleConsumer(object): + """This is an example consumer that will handle unexpected interactions + with RabbitMQ such as channel and connection closures. + + If RabbitMQ closes the connection, it will reopen it. You should + look at the output, as there are limited reasons why the connection may + be closed, which usually are tied to permission related issues or + socket timeouts. + + If the channel is closed, it will indicate a problem with one of the + commands that were issued and that should surface in the output as well. + + """ + EXCHANGE = 'message' + EXCHANGE_TYPE = 'topic' + QUEUE = 'text' + ROUTING_KEY = 'example.text' + + def __init__(self, amqp_url): + """Create a new instance of the consumer class, passing in the AMQP + URL used to connect to RabbitMQ. + + :param str amqp_url: The AMQP url to connect with + + """ + self._connection = None + self._channel = None + self._closing = False + self._consumer_tag = None + self._url = amqp_url + + def connect(self): + """This method connects to RabbitMQ, returning the connection handle. + When the connection is established, the on_connection_open method + will be invoked by pika. + + :rtype: pika.SelectConnection + + """ + LOGGER.info('Connecting to %s', self._url) + return adapters.AsyncioConnection(pika.URLParameters(self._url), + self.on_connection_open) + + def close_connection(self): + """This method closes the connection to RabbitMQ.""" + LOGGER.info('Closing connection') + self._connection.close() + + def add_on_connection_close_callback(self): + """This method adds an on close callback that will be invoked by pika + when RabbitMQ closes the connection to the publisher unexpectedly. + + """ + LOGGER.info('Adding connection close callback') + self._connection.add_on_close_callback(self.on_connection_closed) + + def on_connection_closed(self, connection, reply_code, reply_text): + """This method is invoked by pika when the connection to RabbitMQ is + closed unexpectedly. Since it is unexpected, we will reconnect to + RabbitMQ if it disconnects. + + :param pika.connection.Connection connection: The closed connection obj + :param int reply_code: The server provided reply_code if given + :param str reply_text: The server provided reply_text if given + + """ + self._channel = None + if self._closing: + self._connection.ioloop.stop() + else: + LOGGER.warning('Connection closed, reopening in 5 seconds: (%s) %s', + reply_code, reply_text) + self._connection.add_timeout(5, self.reconnect) + + def on_connection_open(self, unused_connection): + """This method is called by pika once the connection to RabbitMQ has + been established. It passes the handle to the connection object in + case we need it, but in this case, we'll just mark it unused. + + :type unused_connection: pika.SelectConnection + + """ + LOGGER.info('Connection opened') + self.add_on_connection_close_callback() + self.open_channel() + + def reconnect(self): + """Will be invoked by the IOLoop timer if the connection is + closed. See the on_connection_closed method. + + """ + if not self._closing: + + # Create a new connection + self._connection = self.connect() + + def add_on_channel_close_callback(self): + """This method tells pika to call the on_channel_closed method if + RabbitMQ unexpectedly closes the channel. + + """ + LOGGER.info('Adding channel close callback') + self._channel.add_on_close_callback(self.on_channel_closed) + + def on_channel_closed(self, channel, reply_code, reply_text): + """Invoked by pika when RabbitMQ unexpectedly closes the channel. + Channels are usually closed if you attempt to do something that + violates the protocol, such as re-declare an exchange or queue with + different parameters. In this case, we'll close the connection + to shutdown the object. + + :param pika.channel.Channel: The closed channel + :param int reply_code: The numeric reason the channel was closed + :param str reply_text: The text reason the channel was closed + + """ + LOGGER.warning('Channel %i was closed: (%s) %s', + channel, reply_code, reply_text) + self._connection.close() + + def on_channel_open(self, channel): + """This method is invoked by pika when the channel has been opened. + The channel object is passed in so we can make use of it. + + Since the channel is now open, we'll declare the exchange to use. + + :param pika.channel.Channel channel: The channel object + + """ + LOGGER.info('Channel opened') + self._channel = channel + self.add_on_channel_close_callback() + self.setup_exchange(self.EXCHANGE) + + def setup_exchange(self, exchange_name): + """Setup the exchange on RabbitMQ by invoking the Exchange.Declare RPC + command. When it is complete, the on_exchange_declareok method will + be invoked by pika. + + :param str|unicode exchange_name: The name of the exchange to declare + + """ + LOGGER.info('Declaring exchange %s', exchange_name) + self._channel.exchange_declare(self.on_exchange_declareok, + exchange_name, + self.EXCHANGE_TYPE) + + def on_exchange_declareok(self, unused_frame): + """Invoked by pika when RabbitMQ has finished the Exchange.Declare RPC + command. + + :param pika.Frame.Method unused_frame: Exchange.DeclareOk response frame + + """ + LOGGER.info('Exchange declared') + self.setup_queue(self.QUEUE) + + def setup_queue(self, queue_name): + """Setup the queue on RabbitMQ by invoking the Queue.Declare RPC + command. When it is complete, the on_queue_declareok method will + be invoked by pika. + + :param str|unicode queue_name: The name of the queue to declare. + + """ + LOGGER.info('Declaring queue %s', queue_name) + self._channel.queue_declare(self.on_queue_declareok, queue_name) + + def on_queue_declareok(self, method_frame): + """Method invoked by pika when the Queue.Declare RPC call made in + setup_queue has completed. In this method we will bind the queue + and exchange together with the routing key by issuing the Queue.Bind + RPC command. When this command is complete, the on_bindok method will + be invoked by pika. + + :param pika.frame.Method method_frame: The Queue.DeclareOk frame + + """ + LOGGER.info('Binding %s to %s with %s', + self.EXCHANGE, self.QUEUE, self.ROUTING_KEY) + self._channel.queue_bind(self.on_bindok, self.QUEUE, + self.EXCHANGE, self.ROUTING_KEY) + + def add_on_cancel_callback(self): + """Add a callback that will be invoked if RabbitMQ cancels the consumer + for some reason. If RabbitMQ does cancel the consumer, + on_consumer_cancelled will be invoked by pika. + + """ + LOGGER.info('Adding consumer cancellation callback') + self._channel.add_on_cancel_callback(self.on_consumer_cancelled) + + def on_consumer_cancelled(self, method_frame): + """Invoked by pika when RabbitMQ sends a Basic.Cancel for a consumer + receiving messages. + + :param pika.frame.Method method_frame: The Basic.Cancel frame + + """ + LOGGER.info('Consumer was cancelled remotely, shutting down: %r', + method_frame) + if self._channel: + self._channel.close() + + def acknowledge_message(self, delivery_tag): + """Acknowledge the message delivery from RabbitMQ by sending a + Basic.Ack RPC method for the delivery tag. + + :param int delivery_tag: The delivery tag from the Basic.Deliver frame + + """ + LOGGER.info('Acknowledging message %s', delivery_tag) + self._channel.basic_ack(delivery_tag) + + def on_message(self, unused_channel, basic_deliver, properties, body): + """Invoked by pika when a message is delivered from RabbitMQ. The + channel is passed for your convenience. The basic_deliver object that + is passed in carries the exchange, routing key, delivery tag and + a redelivered flag for the message. The properties passed in is an + instance of BasicProperties with the message properties and the body + is the message that was sent. + + :param pika.channel.Channel unused_channel: The channel object + :param pika.Spec.Basic.Deliver: basic_deliver method + :param pika.Spec.BasicProperties: properties + :param str|unicode body: The message body + + """ + LOGGER.info('Received message # %s from %s: %s', + basic_deliver.delivery_tag, properties.app_id, body) + self.acknowledge_message(basic_deliver.delivery_tag) + + def on_cancelok(self, unused_frame): + """This method is invoked by pika when RabbitMQ acknowledges the + cancellation of a consumer. At this point we will close the channel. + This will invoke the on_channel_closed method once the channel has been + closed, which will in-turn close the connection. + + :param pika.frame.Method unused_frame: The Basic.CancelOk frame + + """ + LOGGER.info('RabbitMQ acknowledged the cancellation of the consumer') + self.close_channel() + + def stop_consuming(self): + """Tell RabbitMQ that you would like to stop consuming by sending the + Basic.Cancel RPC command. + + """ + if self._channel: + LOGGER.info('Sending a Basic.Cancel RPC command to RabbitMQ') + self._channel.basic_cancel(self.on_cancelok, self._consumer_tag) + + def start_consuming(self): + """This method sets up the consumer by first calling + add_on_cancel_callback so that the object is notified if RabbitMQ + cancels the consumer. It then issues the Basic.Consume RPC command + which returns the consumer tag that is used to uniquely identify the + consumer with RabbitMQ. We keep the value to use it when we want to + cancel consuming. The on_message method is passed in as a callback pika + will invoke when a message is fully received. + + """ + LOGGER.info('Issuing consumer related RPC commands') + self.add_on_cancel_callback() + self._consumer_tag = self._channel.basic_consume(self.on_message, + self.QUEUE) + + def on_bindok(self, unused_frame): + """Invoked by pika when the Queue.Bind method has completed. At this + point we will start consuming messages by calling start_consuming + which will invoke the needed RPC commands to start the process. + + :param pika.frame.Method unused_frame: The Queue.BindOk response frame + + """ + LOGGER.info('Queue bound') + self.start_consuming() + + def close_channel(self): + """Call to close the channel with RabbitMQ cleanly by issuing the + Channel.Close RPC command. + + """ + LOGGER.info('Closing the channel') + self._channel.close() + + def open_channel(self): + """Open a new channel with RabbitMQ by issuing the Channel.Open RPC + command. When RabbitMQ responds that the channel is open, the + on_channel_open callback will be invoked by pika. + + """ + LOGGER.info('Creating a new channel') + self._connection.channel(on_open_callback=self.on_channel_open) + + def run(self): + """Run the example consumer by connecting to RabbitMQ and then + starting the IOLoop to block and allow the SelectConnection to operate. + + """ + self._connection = self.connect() + self._connection.ioloop.start() + + def stop(self): + """Cleanly shutdown the connection to RabbitMQ by stopping the consumer + with RabbitMQ. When RabbitMQ confirms the cancellation, on_cancelok + will be invoked by pika, which will then closing the channel and + connection. The IOLoop is started again because this method is invoked + when CTRL-C is pressed raising a KeyboardInterrupt exception. This + exception stops the IOLoop which needs to be running for pika to + communicate with RabbitMQ. All of the commands issued prior to starting + the IOLoop will be buffered but not processed. + + """ + LOGGER.info('Stopping') + self._closing = True + self.stop_consuming() + self._connection.ioloop.start() + LOGGER.info('Stopped') + + + def main(): + logging.basicConfig(level=logging.INFO, format=LOG_FORMAT) + example = ExampleConsumer('amqp://guest:guest@localhost:5672/%2F') + try: + example.run() + except KeyboardInterrupt: + example.stop() + + + if __name__ == '__main__': + main() + diff -Nru python-pika-0.10.0/docs/examples/blocking_basic_get.rst python-pika-0.11.0/docs/examples/blocking_basic_get.rst --- python-pika-0.10.0/docs/examples/blocking_basic_get.rst 2015-09-02 17:29:24.000000000 +0000 +++ python-pika-0.11.0/docs/examples/blocking_basic_get.rst 2017-08-29 16:54:39.000000000 +0000 @@ -17,7 +17,7 @@ channel = connection.channel() method_frame, header_frame, body = channel.basic_get('test') if method_frame: - print method_frame, header_frame, body + print(method_frame, header_frame, body) channel.basic_ack(method_frame.delivery_tag) else: - print 'No message returned' + print('No message returned') diff -Nru python-pika-0.10.0/docs/examples/blocking_consumer_generator.rst python-pika-0.11.0/docs/examples/blocking_consumer_generator.rst --- python-pika-0.10.0/docs/examples/blocking_consumer_generator.rst 2015-09-02 17:29:24.000000000 +0000 +++ python-pika-0.11.0/docs/examples/blocking_consumer_generator.rst 2017-08-29 16:54:39.000000000 +0000 @@ -18,9 +18,9 @@ for method_frame, properties, body in channel.consume('test'): # Display the message parts - print method_frame - print properties - print body + print(method_frame) + print(properties) + print(body) # Acknowledge the message channel.basic_ack(method_frame.delivery_tag) @@ -31,7 +31,7 @@ # Cancel the consumer and return any pending messages requeued_messages = channel.cancel() - print 'Requeued %i messages' % requeued_messages + print('Requeued %i messages' % requeued_messages) # Close the channel and the connection channel.close() diff -Nru python-pika-0.10.0/docs/examples/blocking_consume.rst python-pika-0.11.0/docs/examples/blocking_consume.rst --- python-pika-0.10.0/docs/examples/blocking_consume.rst 2015-09-02 17:29:24.000000000 +0000 +++ python-pika-0.11.0/docs/examples/blocking_consume.rst 2017-08-29 16:54:39.000000000 +0000 @@ -9,21 +9,21 @@ Example of consuming messages and acknowledging them:: - import pika + import pika - def on_message(channel, method_frame, header_frame, body): - print method_frame.delivery_tag - print body - print - channel.basic_ack(delivery_tag=method_frame.delivery_tag) - - - connection = pika.BlockingConnection() - channel = connection.channel() - channel.basic_consume(on_message, 'test') - try: - channel.start_consuming() - except KeyboardInterrupt: - channel.stop_consuming() - connection.close() + def on_message(channel, method_frame, header_frame, body): + print(method_frame.delivery_tag) + print(body) + print() + channel.basic_ack(delivery_tag=method_frame.delivery_tag) + + + connection = pika.BlockingConnection() + channel = connection.channel() + channel.basic_consume(on_message, 'test') + try: + channel.start_consuming() + except KeyboardInterrupt: + channel.stop_consuming() + connection.close() \ No newline at end of file diff -Nru python-pika-0.10.0/docs/examples/blocking_delivery_confirmations.rst python-pika-0.11.0/docs/examples/blocking_delivery_confirmations.rst --- python-pika-0.10.0/docs/examples/blocking_delivery_confirmations.rst 2015-09-02 17:29:24.000000000 +0000 +++ python-pika-0.11.0/docs/examples/blocking_delivery_confirmations.rst 2017-08-29 16:54:39.000000000 +0000 @@ -23,6 +23,6 @@ body='Hello World!', properties=pika.BasicProperties(content_type='text/plain', delivery_mode=1)): - print 'Message publish was confirmed' + print('Message publish was confirmed') else: - print 'Message could not be confirmed' + print('Message could not be confirmed') diff -Nru python-pika-0.10.0/docs/examples/blocking_publish_mandatory.rst python-pika-0.11.0/docs/examples/blocking_publish_mandatory.rst --- python-pika-0.10.0/docs/examples/blocking_publish_mandatory.rst 2015-09-02 17:29:24.000000000 +0000 +++ python-pika-0.11.0/docs/examples/blocking_publish_mandatory.rst 2017-08-29 16:54:39.000000000 +0000 @@ -16,7 +16,7 @@ # Enabled delivery confirmations channel.confirm_delivery() - + # Send a message if channel.basic_publish(exchange='test', routing_key='test', @@ -24,6 +24,6 @@ properties=pika.BasicProperties(content_type='text/plain', delivery_mode=1), mandatory=True): - print 'Message was published' + print('Message was published') else: - print 'Message was returned' + print('Message was returned') diff -Nru python-pika-0.10.0/docs/examples/direct_reply_to.rst python-pika-0.11.0/docs/examples/direct_reply_to.rst --- python-pika-0.10.0/docs/examples/direct_reply_to.rst 1970-01-01 00:00:00.000000000 +0000 +++ python-pika-0.11.0/docs/examples/direct_reply_to.rst 2017-08-29 16:54:39.000000000 +0000 @@ -0,0 +1,81 @@ +Direct reply-to example +============================== +The following example demonstrates the use of the RabbitMQ "Direct reply-to" feature via `pika.BlockingConnection`. See https://www.rabbitmq.com/direct-reply-to.html for more info about this feature. + +direct_reply_to.py:: + + # -*- coding: utf-8 -*- + + """ + This example demonstrates the RabbitMQ "Direct reply-to" usage via + `pika.BlockingConnection`. See https://www.rabbitmq.com/direct-reply-to.html + for more info about this feature. + """ + import pika + + + SERVER_QUEUE = 'rpc.server.queue' + + + def main(): + """ Here, Client sends "Marco" to RPC Server, and RPC Server replies with + "Polo". + + NOTE Normally, the server would be running separately from the client, but + in this very simple example both are running in the same thread and sharing + connection and channel. + + """ + with pika.BlockingConnection() as conn: + channel = conn.channel() + + # Set up server + + channel.queue_declare(queue=SERVER_QUEUE, + exclusive=True, + auto_delete=True) + channel.basic_consume(on_server_rx_rpc_request, queue=SERVER_QUEUE) + + + # Set up client + + # NOTE Client must create its consumer and publish RPC requests on the + # same channel to enable the RabbitMQ broker to make the necessary + # associations. + # + # Also, client must create the consumer *before* starting to publish the + # RPC requests. + # + # Client must create its consumer with no_ack=True, because the reply-to + # queue isn't real. + + channel.basic_consume(on_client_rx_reply_from_server, + queue='amq.rabbitmq.reply-to', + no_ack=True) + channel.basic_publish( + exchange='', + routing_key=SERVER_QUEUE, + body='Marco', + properties=pika.BasicProperties(reply_to='amq.rabbitmq.reply-to')) + + channel.start_consuming() + + + def on_server_rx_rpc_request(ch, method_frame, properties, body): + print 'RPC Server got request:', body + + ch.basic_publish('', routing_key=properties.reply_to, body='Polo') + + ch.basic_ack(delivery_tag=method_frame.delivery_tag) + + print 'RPC Server says good bye' + + + def on_client_rx_reply_from_server(ch, method_frame, properties, body): + print 'RPC Client got reply:', body + + # NOTE A real client might want to make additional RPC requests, but in this + # simple example we're closing the channel after getting our first reply + # to force control to return from channel.start_consuming() + print 'RPC Client says bye' + ch.close() diff -Nru python-pika-0.10.0/docs/examples/heartbeat_and_blocked_timeouts.rst python-pika-0.11.0/docs/examples/heartbeat_and_blocked_timeouts.rst --- python-pika-0.10.0/docs/examples/heartbeat_and_blocked_timeouts.rst 1970-01-01 00:00:00.000000000 +0000 +++ python-pika-0.11.0/docs/examples/heartbeat_and_blocked_timeouts.rst 2017-08-29 16:54:39.000000000 +0000 @@ -0,0 +1,37 @@ +Ensuring well-behaved connection with heartbeat and blocked-connection timeouts +=============================================================================== + + +This example demonstrates explicit setting of heartbeat and blocked connection timeouts. + +Starting with RabbitMQ 3.5.5, the broker's default hearbeat timeout decreased from 580 seconds to 60 seconds. As a result, applications that perform lengthy processing in the same thread that also runs their Pika connection may experience unexpected dropped connections due to heartbeat timeout. Here, we specify an explicit lower bound for heartbeat timeout. + +When RabbitMQ broker is running out of certain resources, such as memory and disk space, it may block connections that are performing resource-consuming operations, such as publishing messages. Once a connection is blocked, RabbiMQ stops reading from that connection's socket, so no commands from the client will get through to te broker on that connection until the broker unblocks it. A blocked connection may last for an indefinite period of time, stalling the connection and possibly resulting in a hang (e.g., in BlockingConnection) until the connection is unblocked. Blocked Connectin Timeout is intended to interrupt (i.e., drop) a connection that has been blocked longer than the given timeout value. + +Example of configuring hertbeat and blocked-connection timeouts:: + + import pika + + + def main(): + + # NOTE: These paramerers work with all Pika connection types + params = pika.ConnectionParameters(heartbeat_interval=600, + blocked_connection_timeout=300) + + conn = pika.BlockingConnection(params) + + chan = conn.channel() + + chan.basic_publish('', 'my-alphabet-queue', "abc") + + # If publish causes the connection to become blocked, then this conn.close() + # would hang until the connection is unblocked, if ever. However, the + # blocked_connection_timeout connection parameter would interrupt the wait, + # resulting in ConnectionClosed exception from BlockingConnection (or the + # on_connection_closed callback call in an asynchronous adapter) + conn.close() + + + if __name__ == '__main__': + main() diff -Nru python-pika-0.10.0/docs/examples/tls_mutual_authentication.rst python-pika-0.11.0/docs/examples/tls_mutual_authentication.rst --- python-pika-0.10.0/docs/examples/tls_mutual_authentication.rst 1970-01-01 00:00:00.000000000 +0000 +++ python-pika-0.11.0/docs/examples/tls_mutual_authentication.rst 2017-08-29 16:54:39.000000000 +0000 @@ -0,0 +1,61 @@ +TLS parameters example +============================= +This examples demonstrates a TLS session with RabbitMQ using mutual authentication. + +It was tested against RabbitMQ 3.6.10, using Python 3.6.1 and pre-release Pika `0.11.0` + +Note the use of `ssl_version=ssl.PROTOCOL_TLSv1`. The recent verions of RabbitMQ disable older versions of +SSL due to security vulnerabilities. + +See https://www.rabbitmq.com/ssl.html for certificate creation and rabbitmq SSL configuration instructions. + + +tls_example.py:: + + import ssl + import pika + import logging + + logging.basicConfig(level=logging.INFO) + + cp = pika.ConnectionParameters( + ssl=True, + ssl_options=dict( + ssl_version=ssl.PROTOCOL_TLSv1, + ca_certs="/Users/me/tls-gen/basic/testca/cacert.pem", + keyfile="/Users/me/tls-gen/basic/client/key.pem", + certfile="/Users/me/tls-gen/basic/client/cert.pem", + cert_reqs=ssl.CERT_REQUIRED)) + + conn = pika.BlockingConnection(cp) + ch = conn.channel() + print(ch.queue_declare("sslq")) + ch.publish("", "sslq", "abc") + print(ch.basic_get("sslq")) + + +rabbitmq.config:: + + %% Both the client and rabbitmq server were running on the same machine, a MacBookPro laptop. + %% + %% rabbitmq.config was created in its default location for OS X: /usr/local/etc/rabbitmq/rabbitmq.config. + %% + %% The contents of the example rabbitmq.config are for demonstration purposes only. See https://www.rabbitmq.com/ssl.html for instructions about creating the test certificates and the contents of rabbitmq.config. + + + [ + {rabbit, + [ + {ssl_listeners, [{"127.0.0.1", 5671}]}, + + %% Configuring SSL. + %% See http://www.rabbitmq.com/ssl.html for full documentation. + %% + {ssl_options, [{cacertfile, "/Users/me/tls-gen/basic/testca/cacert.pem"}, + {certfile, "/Users/me/tls-gen/basic/server/cert.pem"}, + {keyfile, "/Users/me/tls-gen/basic/server/key.pem"}, + {verify, verify_peer}, + {fail_if_no_peer_cert, true}]} + ] + } + ]. diff -Nru python-pika-0.10.0/docs/examples/tls_server_uathentication.rst python-pika-0.11.0/docs/examples/tls_server_uathentication.rst --- python-pika-0.10.0/docs/examples/tls_server_uathentication.rst 1970-01-01 00:00:00.000000000 +0000 +++ python-pika-0.11.0/docs/examples/tls_server_uathentication.rst 2017-08-29 16:54:39.000000000 +0000 @@ -0,0 +1,60 @@ +TLS parameters example +============================= +This examples demonstrates a TLS session with RabbitMQ using server authentication. + +It was tested against RabbitMQ 3.6.10, using Python 3.6.1 and pre-release Pika `0.11.0` + +Note the use of `ssl_version=ssl.PROTOCOL_TLSv1`. The recent verions of RabbitMQ disable older versions of +SSL due to security vulnerabilities. + +See https://www.rabbitmq.com/ssl.html for certificate creation and rabbitmq SSL configuration instructions. + + +tls_example.py:: + + import ssl + import pika + import logging + + logging.basicConfig(level=logging.INFO) + + cp = pika.ConnectionParameters( + ssl=True, + ssl_options=dict( + ssl_version=ssl.PROTOCOL_TLSv1, + ca_certs="/Users/me/tls-gen/basic/testca/cacert.pem", + cert_reqs=ssl.CERT_REQUIRED)) + + conn = pika.BlockingConnection(cp) + ch = conn.channel() + print(ch.queue_declare("sslq")) + ch.publish("", "sslq", "abc") + print(ch.basic_get("sslq")) + + +rabbitmq.config:: + + %% Both the client and rabbitmq server were running on the same machine, a MacBookPro laptop. + %% + %% rabbitmq.config was created in its default location for OS X: /usr/local/etc/rabbitmq/rabbitmq.config. + %% + %% The contents of the example rabbitmq.config are for demonstration purposes only. See https://www.rabbitmq.com/ssl.html for instructions about creating the test certificates and the contents of rabbitmq.config. + %% + %% Note that the {fail_if_no_peer_cert,false} option, states that RabbitMQ should accept clients that don't have a certificate to send to the broker, but through the {verify,verify_peer} option, we state that if the client does send a certificate to the broker, the broker must be able to establish a chain of trust to it. + + [ + {rabbit, + [ + {ssl_listeners, [{"127.0.0.1", 5671}]}, + + %% Configuring SSL. + %% See http://www.rabbitmq.com/ssl.html for full documentation. + %% + {ssl_options, [{cacertfile, "/Users/me/tls-gen/basic/testca/cacert.pem"}, + {certfile, "/Users/me/tls-gen/basic/server/cert.pem"}, + {keyfile, "/Users/me/tls-gen/basic/server/key.pem"}, + {verify, verify_peer}, + {fail_if_no_peer_cert, false}]} + ] + } + ]. diff -Nru python-pika-0.10.0/docs/examples/twisted_example.rst python-pika-0.11.0/docs/examples/twisted_example.rst --- python-pika-0.10.0/docs/examples/twisted_example.rst 2015-09-02 17:29:24.000000000 +0000 +++ python-pika-0.11.0/docs/examples/twisted_example.rst 2017-08-29 16:54:39.000000000 +0000 @@ -36,7 +36,7 @@ ch,method,properties,body = yield queue_object.get() if body: - print body + print(body) yield ch.basic_ack(delivery_tag=method.delivery_tag) diff -Nru python-pika-0.10.0/docs/examples/using_urlparameters.rst python-pika-0.11.0/docs/examples/using_urlparameters.rst --- python-pika-0.10.0/docs/examples/using_urlparameters.rst 2015-09-02 17:29:24.000000000 +0000 +++ python-pika-0.11.0/docs/examples/using_urlparameters.rst 2017-08-29 16:54:39.000000000 +0000 @@ -33,16 +33,16 @@ If you're looking to tweak other parameters, such as enabling heartbeats, simply add the key/value pair as a query string value. The following builds upon the SSL connection, enabling heartbeats every 30 seconds:: - amqps://www-data:rabbit_pwd@rabbit1/web_messages?heartbeat_interval=30 + amqps://www-data:rabbit_pwd@rabbit1/web_messages?heartbeat=30 Options that are available as query string values: - backpressure_detection: Pass in a value of *t* to enable backpressure detection, it is disabled by default. -- channel_max: Alter the default channel maximum by passing in a 32-bit integer value here -- connection_attempts: Alter the default of 1 connection attempt by passing in an integer value here [#f1]_. -- frame_max: Alter the default frame maximum size value by passing in a long integer value [#f2]_. -- heartbeat_interval: Pass a value greater than zero to enable heartbeats between the server and your application. The integer value you pass here will be the number of seconds between heartbeats. +- channel_max: Alter the default channel maximum by passing in a 32-bit integer value here. +- connection_attempts: Alter the default of 1 connection attempt by passing in an integer value here. +- frame_max: Alter the default frame maximum size value by passing in a long integer value [#f1]_. +- heartbeat: Pass a value greater than zero to enable heartbeats between the server and your application. The integer value you pass here will be the number of seconds between heartbeats. - locale: Set the locale of the client using underscore delimited posix Locale code in ll_CC format (en_US, pt_BR, de_DE). - retry_delay: The number of seconds to wait before attempting to reconnect on a failed connection, if connection_attempts is > 0. - socket_timeout: Change the default socket timeout duration from 0.25 seconds to another integer or float value. Adjust with caution. @@ -55,7 +55,7 @@ For an information on what the ssl_options can be set to reference the `official Python documentation `_. Here is an example of setting the client certificate and key:: - amqp://www-data:rabbit_pwd@rabbit1/web_messages?heartbeat_interval=30&ssl_options=%7B%27keyfile%27%3A+%27%2Fetc%2Fssl%2Fmykey.pem%27%2C+%27certfile%27%3A+%27%2Fetc%2Fssl%2Fmycert.pem%27%7D + amqp://www-data:rabbit_pwd@rabbit1/web_messages?heartbeat=30&ssl_options=%7B%27keyfile%27%3A+%27%2Fetc%2Fssl%2Fmykey.pem%27%2C+%27certfile%27%3A+%27%2Fetc%2Fssl%2Fmycert.pem%27%7D The following example demonstrates how to generate the ssl_options string with `Python's urllib `_:: @@ -65,5 +65,4 @@ .. rubric:: Footnotes -.. [#f1] The :py:class:`pika.adapters.blocking_connection.BlockingConnection` adapter does not respect the *connection_attempts* parameter. -.. [#f2] The AMQP specification states that a server can reject a request for a frame size larger than the value it passes during content negotiation. +.. [#f1] The AMQP specification states that a server can reject a request for a frame size larger than the value it passes during content negotiation. diff -Nru python-pika-0.10.0/docs/examples.rst python-pika-0.11.0/docs/examples.rst --- python-pika-0.10.0/docs/examples.rst 2015-09-02 17:29:24.000000000 +0000 +++ python-pika-0.11.0/docs/examples.rst 2017-08-29 16:54:39.000000000 +0000 @@ -19,3 +19,5 @@ examples/asynchronous_publisher_example examples/twisted_example examples/tornado_consumer + examples/tls_mutual_authentication + examples/tls_server_authentication diff -Nru python-pika-0.10.0/docs/intro.rst python-pika-0.11.0/docs/intro.rst --- python-pika-0.10.0/docs/intro.rst 2015-09-02 17:29:24.000000000 +0000 +++ python-pika-0.11.0/docs/intro.rst 2017-08-29 16:54:39.000000000 +0000 @@ -3,9 +3,9 @@ IO and Event Looping -------------------- -As AMQP is a two-way RPC protocol where the client can send requests to the server and the server can send requests to a client, Pika implements or extends IO loops in each of its asynchronous connection adapters. These IO loops are blocking methods which loop and listen for events. Each asynchronous adapters follows the same standard for invoking the IO loop. The IO loop is created when the connection adapter is created. To start an IO loop for any given adapter, call the ``connection.ioloop.start()`` method. +As AMQP is a two-way RPC protocol where the client can send requests to the server and the server can send requests to a client, Pika implements or extends IO loops in each of its asynchronous connection adapters. These IO loops are blocking methods which loop and listen for events. Each asynchronous adapter follows the same standard for invoking the IO loop. The IO loop is created when the connection adapter is created. To start an IO loop for any given adapter, call the ``connection.ioloop.start()`` method. -If you are using an external IO loop such as Tornado's :class:`~tornado.ioloop.IOLoop`, you invoke it as you normally would and then add the adapter to it. +If you are using an external IO loop such as Tornado's :class:`~tornado.ioloop.IOLoop` you invoke it normally and then add the Pika Tornado adapter to it. Example:: @@ -32,7 +32,7 @@ Continuation-Passing Style -------------------------- -Interfacing with Pika asynchronously is done by passing in callback methods you would like to have invoked when a certain event has completed. For example, if you are going to declare a queue, you pass in a method that will be called when the RabbitMQ server returns a `Queue.DeclareOk `_ response. +Interfacing with Pika asynchronously is done by passing in callback methods you would like to have invoked when a certain event completes. For example, if you are going to declare a queue, you pass in a method that will be called when the RabbitMQ server returns a `Queue.DeclareOk `_ response. In our example below we use the following four easy steps: @@ -40,7 +40,7 @@ #. When we are connected, the *on_connected* method is called. In that method we create a channel. #. When the channel is created, the *on_channel_open* method is called. In that method we declare a queue. #. When the queue is declared successfully, *on_queue_declared* is called. In that method we call :py:meth:`channel.basic_consume ` telling it to call the handle_delivery for each message RabbitMQ delivers to us. -#. When RabbitMQ has a message to send us, it call the handle_delivery method passing the AMQP Method frame, Header frame and Body. +#. When RabbitMQ has a message to send us, it calls the handle_delivery method passing the AMQP Method frame, Header frame, and Body. .. NOTE:: Step #1 is on line #28 and Step #2 is on line #6. This is so that Python knows about the functions we'll call in Steps #2 through #5. @@ -75,7 +75,7 @@ # Step #5 def handle_delivery(channel, method, header, body): """Called when we receive a message from RabbitMQ""" - print body + print(body) # Step #1: Connect to RabbitMQ using the default parameters parameters = pika.ConnectionParameters() diff -Nru python-pika-0.10.0/docs/modules/adapters/asyncio.rst python-pika-0.11.0/docs/modules/adapters/asyncio.rst --- python-pika-0.10.0/docs/modules/adapters/asyncio.rst 1970-01-01 00:00:00.000000000 +0000 +++ python-pika-0.11.0/docs/modules/adapters/asyncio.rst 2017-08-29 16:54:39.000000000 +0000 @@ -0,0 +1,9 @@ +asyncio Connection Adapter +========================== +.. automodule:: pika.adapters.asyncio_connection + +Be sure to check out the :doc:`asynchronous examples ` including the asyncio specific :doc:`consumer ` example. + +.. autoclass:: pika.adapters.asyncio_connection.AsyncioConnection + :members: + :inherited-members: diff -Nru python-pika-0.10.0/docs/version_history.rst python-pika-0.11.0/docs/version_history.rst --- python-pika-0.10.0/docs/version_history.rst 2015-09-02 17:29:24.000000000 +0000 +++ python-pika-0.11.0/docs/version_history.rst 2017-08-29 16:54:39.000000000 +0000 @@ -1,9 +1,70 @@ Version History =============== +Next Release +------------ + +0.11.0 2017-07-29 +----------------- + +`0.11.0 `_ + +`GitHub milestone `_ + + - Simplify Travis CI configuration for OS X. + - Add `asyncio` connection adapter for Python 3.4 and newer. + - Connection failures that occur after the socket is opened and before the + AMQP connection is ready to go are now reported by calling the connection + error callback. Previously these were not consistently reported. + - In BaseConnection.close, call _handle_ioloop_stop only if the connection is + already closed to allow the asynchronous close operation to complete + gracefully. + - Pass error information from failed socket connection to user callbacks + on_open_error_callback and on_close_callback with result_code=-1. + - ValueError is raised when a completion callback is passed to an asynchronous + (nowait) Channel operation. It's an application error to pass a non-None + completion callback with an asynchronous request, because this callback can + never be serviced in the asynchronous scenario. + - `Channel.basic_reject` fixed to allow `delivery_tag` to be of type `long` + as well as `int`. (by quantum5) + - Implemented support for blocked connection timeouts in + `pika.connection.Connection`. This feature is available to all pika adapters. + See `pika.connection.ConnectionParameters` docstring to learn more about + `blocked_connection_timeout` configuration. + - Deprecated the `heartbeat_interval` arg in `pika.ConnectionParameters` in + favor of the `heartbeat` arg for consistency with the other connection + parameters classes `pika.connection.Parameters` and `pika.URLParameters`. + - When the `port` arg is not set explicitly in `ConnectionParameters` + constructor, but the `ssl` arg is set explicitly, then set the port value to + to the default AMQP SSL port if SSL is enabled, otherwise to the default + AMQP plaintext port. + - `URLParameters` will raise ValueError if a non-empty URL scheme other than + {amqp | amqps | http | https} is specified. + - `InvalidMinimumFrameSize` and `InvalidMaximumFrameSize` exceptions are + deprecated. pika.connection.Parameters.frame_max property setter now raises + the standard `ValueError` exception when the value is out of bounds. + - Removed deprecated parameter `type` in `Channel.exchange_declare` and + `BlockingChannel.exchnage_declare` in favor of the `exchange_type` arg that + doesn't overshadow the builtin `type` keyword. + - Channel.close() on OPENING channel transitions it to CLOSING instead of + raising ChannelClosed. + - Channel.close() on CLOSING channel raises `ChannelAlreadyClosing`; used to + raise `ChannelClosed`. + - Connection.channel() raises `ConnectionClosed` if connection is not in OPEN + state. + - When performing graceful close on a channel and `Channel.Close` from broker + arrives while waiting for CloseOk, don't release the channel number until + CloseOk arrives to avoid race condition that may lead to a new channel + receiving the CloseOk that was destined for the closing channel. + - The `backpressure_detection` option of `ConnectionParameters` and + `URLParameters` property is DEPRECATED in favor of `Connection.Blocked` and + `Connection.Unblocked`. See `Connection.add_on_connection_blocked_callback`. + 0.10.0 2015-09-02 ----------------- +`0.10.0 `_ + - LibevConnection: Fixed dict chgd size during iteration (Michael Laing) - SelectConnection: Fixed KeyError exceptions in IOLoop timeout executions (Shinji Suzuki) - BlockingConnection: Add support to make BlockingConnection a Context Manager (@reddec) @@ -14,7 +75,7 @@ - f72b58f - Fixed failure to purge _ConsumerCancellationEvt from BlockingChannel._pending_events during basic_cancel. (Vitaly Kruglikov) 0.10.0b1 2015-07-10 ---------------------- +------------------- High-level summary of notable changes: @@ -188,7 +249,7 @@ - d235989 - Be more specific when calling getaddrinfo (Gavin M. Roy) - b5d1b31 - Reflect the method name change in pika.callback (Gavin M. Roy) - df7d3b7 - Cleanup BlockingConnection in a few places (Gavin M. Roy) - - cd99e1c - Rename method due to use in BlockingConnection (Gavin M. Roy) + - cd98e1c - Rename method due to use in BlockingConnection (Gavin M. Roy) - 7e0d1b3 - Use google style with yapf instead of pep8 (Gavin M. Roy) - 7dc9bab - Refactor socket writing to not use sendall #481 (Gavin M. Roy) - 4838789 - Dont log the fd #521 (Gavin M. Roy) @@ -233,6 +294,8 @@ 0.9.14 - 2014-07-11 ------------------- +`0.9.14 `_ + - 57fe43e - fix test to generate a correct range of random ints (ml) - 0d68dee - fix async watcher for libev_connection (ml) - 01710ad - Use default username and password if not specified in URLParameters (Sean Dwyer) @@ -372,6 +435,9 @@ 0.9.13 - 2013-05-15 ------------------- + +`0.9.13 `_ + **Major Changes** - IPv6 Support with thanks to Alessandro Tagliapietra for initial prototype @@ -411,6 +477,8 @@ 0.9.12 - 2013-03-18 ------------------- +`0.9.12 `_ + **Bugfixes** - New timeout id hashing was not unique @@ -418,6 +486,8 @@ 0.9.11 - 2013-03-17 ------------------- +`0.9.11 `_ + **Bugfixes** - Address inconsistent channel close callback documentation and add the signature @@ -429,6 +499,8 @@ 0.9.10 - 2013-03-16 ------------------- +`0.9.10 `_ + **Bugfixes** - Fix timeout in twisted adapter (Submitted by cellscape) @@ -463,6 +535,8 @@ 0.9.9 - 2013-01-29 ------------------ +`0.9.9 `_ + **Bugfixes** - Only remove the tornado_connection.TornadoConnection file descriptor from the IOLoop if it's still open (Issue #221) @@ -486,6 +560,8 @@ 0.9.8 - 2012-11-18 ------------------ +`0.9.8 `_ + **Bugfixes** - Channel.queue_declare/BlockingChannel.queue_declare not setting up callbacks property for empty queue name (Issue #218) @@ -496,6 +572,8 @@ 0.9.7 - 2012-11-11 ------------------ +`0.9.7 `_ + **New features** - generator based consumer in BlockingChannel (See :doc:`examples/blocking_consumer_generator` for example) @@ -519,6 +597,8 @@ 0.9.6 - 2012-10-29 ------------------ +`0.9.6 `_ + **New features** - URLParameters @@ -544,6 +624,8 @@ 0.9.5 - 2011-03-29 ------------------ +`0.9.5 `_ + **Changelog** - Scope changes with adapter IOLoops and CallbackManager allowing for cleaner, multi-threaded operation diff -Nru python-pika-0.10.0/examples/asynchronous_publisher_example.py python-pika-0.11.0/examples/asynchronous_publisher_example.py --- python-pika-0.10.0/examples/asynchronous_publisher_example.py 1970-01-01 00:00:00.000000000 +0000 +++ python-pika-0.11.0/examples/asynchronous_publisher_example.py 2017-08-29 16:54:39.000000000 +0000 @@ -0,0 +1,353 @@ +# -*- coding: utf-8 -*- + +import logging +import pika +import json + +LOG_FORMAT = ('%(levelname) -10s %(asctime)s %(name) -30s %(funcName) ' + '-35s %(lineno) -5d: %(message)s') +LOGGER = logging.getLogger(__name__) + + +class ExamplePublisher(object): + """This is an example publisher that will handle unexpected interactions + with RabbitMQ such as channel and connection closures. + + If RabbitMQ closes the connection, it will reopen it. You should + look at the output, as there are limited reasons why the connection may + be closed, which usually are tied to permission related issues or + socket timeouts. + + It uses delivery confirmations and illustrates one way to keep track of + messages that have been sent and if they've been confirmed by RabbitMQ. + + """ + EXCHANGE = 'message' + EXCHANGE_TYPE = 'topic' + PUBLISH_INTERVAL = 1 + QUEUE = 'text' + ROUTING_KEY = 'example.text' + + def __init__(self, amqp_url): + """Setup the example publisher object, passing in the URL we will use + to connect to RabbitMQ. + + :param str amqp_url: The URL for connecting to RabbitMQ + + """ + self._connection = None + self._channel = None + + self._deliveries = None + self._acked = None + self._nacked = None + self._message_number = None + + self._stopping = False + self._url = amqp_url + + def connect(self): + """This method connects to RabbitMQ, returning the connection handle. + When the connection is established, the on_connection_open method + will be invoked by pika. If you want the reconnection to work, make + sure you set stop_ioloop_on_close to False, which is not the default + behavior of this adapter. + + :rtype: pika.SelectConnection + + """ + LOGGER.info('Connecting to %s', self._url) + return pika.SelectConnection(pika.URLParameters(self._url), + on_open_callback=self.on_connection_open, + on_close_callback=self.on_connection_closed, + stop_ioloop_on_close=False) + + def on_connection_open(self, unused_connection): + """This method is called by pika once the connection to RabbitMQ has + been established. It passes the handle to the connection object in + case we need it, but in this case, we'll just mark it unused. + + :type unused_connection: pika.SelectConnection + + """ + LOGGER.info('Connection opened') + self.open_channel() + + def on_connection_closed(self, connection, reply_code, reply_text): + """This method is invoked by pika when the connection to RabbitMQ is + closed unexpectedly. Since it is unexpected, we will reconnect to + RabbitMQ if it disconnects. + + :param pika.connection.Connection connection: The closed connection obj + :param int reply_code: The server provided reply_code if given + :param str reply_text: The server provided reply_text if given + + """ + self._channel = None + if self._stopping: + self._connection.ioloop.stop() + else: + LOGGER.warning('Connection closed, reopening in 5 seconds: (%s) %s', + reply_code, reply_text) + self._connection.add_timeout(5, self._connection.ioloop.stop) + + def open_channel(self): + """This method will open a new channel with RabbitMQ by issuing the + Channel.Open RPC command. When RabbitMQ confirms the channel is open + by sending the Channel.OpenOK RPC reply, the on_channel_open method + will be invoked. + + """ + LOGGER.info('Creating a new channel') + self._connection.channel(on_open_callback=self.on_channel_open) + + def on_channel_open(self, channel): + """This method is invoked by pika when the channel has been opened. + The channel object is passed in so we can make use of it. + + Since the channel is now open, we'll declare the exchange to use. + + :param pika.channel.Channel channel: The channel object + + """ + LOGGER.info('Channel opened') + self._channel = channel + self.add_on_channel_close_callback() + self.setup_exchange(self.EXCHANGE) + + def add_on_channel_close_callback(self): + """This method tells pika to call the on_channel_closed method if + RabbitMQ unexpectedly closes the channel. + + """ + LOGGER.info('Adding channel close callback') + self._channel.add_on_close_callback(self.on_channel_closed) + + def on_channel_closed(self, channel, reply_code, reply_text): + """Invoked by pika when RabbitMQ unexpectedly closes the channel. + Channels are usually closed if you attempt to do something that + violates the protocol, such as re-declare an exchange or queue with + different parameters. In this case, we'll close the connection + to shutdown the object. + + :param pika.channel.Channel channel: The closed channel + :param int reply_code: The numeric reason the channel was closed + :param str reply_text: The text reason the channel was closed + + """ + LOGGER.warning('Channel was closed: (%s) %s', reply_code, reply_text) + self._channel = None + if not self._stopping: + self._connection.close() + + def setup_exchange(self, exchange_name): + """Setup the exchange on RabbitMQ by invoking the Exchange.Declare RPC + command. When it is complete, the on_exchange_declareok method will + be invoked by pika. + + :param str|unicode exchange_name: The name of the exchange to declare + + """ + LOGGER.info('Declaring exchange %s', exchange_name) + self._channel.exchange_declare(self.on_exchange_declareok, + exchange_name, + self.EXCHANGE_TYPE) + + def on_exchange_declareok(self, unused_frame): + """Invoked by pika when RabbitMQ has finished the Exchange.Declare RPC + command. + + :param pika.Frame.Method unused_frame: Exchange.DeclareOk response frame + + """ + LOGGER.info('Exchange declared') + self.setup_queue(self.QUEUE) + + def setup_queue(self, queue_name): + """Setup the queue on RabbitMQ by invoking the Queue.Declare RPC + command. When it is complete, the on_queue_declareok method will + be invoked by pika. + + :param str|unicode queue_name: The name of the queue to declare. + + """ + LOGGER.info('Declaring queue %s', queue_name) + self._channel.queue_declare(self.on_queue_declareok, queue_name) + + def on_queue_declareok(self, method_frame): + """Method invoked by pika when the Queue.Declare RPC call made in + setup_queue has completed. In this method we will bind the queue + and exchange together with the routing key by issuing the Queue.Bind + RPC command. When this command is complete, the on_bindok method will + be invoked by pika. + + :param pika.frame.Method method_frame: The Queue.DeclareOk frame + + """ + LOGGER.info('Binding %s to %s with %s', + self.EXCHANGE, self.QUEUE, self.ROUTING_KEY) + self._channel.queue_bind(self.on_bindok, self.QUEUE, + self.EXCHANGE, self.ROUTING_KEY) + + def on_bindok(self, unused_frame): + """This method is invoked by pika when it receives the Queue.BindOk + response from RabbitMQ. Since we know we're now setup and bound, it's + time to start publishing.""" + LOGGER.info('Queue bound') + self.start_publishing() + + def start_publishing(self): + """This method will enable delivery confirmations and schedule the + first message to be sent to RabbitMQ + + """ + LOGGER.info('Issuing consumer related RPC commands') + self.enable_delivery_confirmations() + self.schedule_next_message() + + def enable_delivery_confirmations(self): + """Send the Confirm.Select RPC method to RabbitMQ to enable delivery + confirmations on the channel. The only way to turn this off is to close + the channel and create a new one. + + When the message is confirmed from RabbitMQ, the + on_delivery_confirmation method will be invoked passing in a Basic.Ack + or Basic.Nack method from RabbitMQ that will indicate which messages it + is confirming or rejecting. + + """ + LOGGER.info('Issuing Confirm.Select RPC command') + self._channel.confirm_delivery(self.on_delivery_confirmation) + + def on_delivery_confirmation(self, method_frame): + """Invoked by pika when RabbitMQ responds to a Basic.Publish RPC + command, passing in either a Basic.Ack or Basic.Nack frame with + the delivery tag of the message that was published. The delivery tag + is an integer counter indicating the message number that was sent + on the channel via Basic.Publish. Here we're just doing house keeping + to keep track of stats and remove message numbers that we expect + a delivery confirmation of from the list used to keep track of messages + that are pending confirmation. + + :param pika.frame.Method method_frame: Basic.Ack or Basic.Nack frame + + """ + confirmation_type = method_frame.method.NAME.split('.')[1].lower() + LOGGER.info('Received %s for delivery tag: %i', + confirmation_type, + method_frame.method.delivery_tag) + if confirmation_type == 'ack': + self._acked += 1 + elif confirmation_type == 'nack': + self._nacked += 1 + self._deliveries.remove(method_frame.method.delivery_tag) + LOGGER.info('Published %i messages, %i have yet to be confirmed, ' + '%i were acked and %i were nacked', + self._message_number, len(self._deliveries), + self._acked, self._nacked) + + def schedule_next_message(self): + """If we are not closing our connection to RabbitMQ, schedule another + message to be delivered in PUBLISH_INTERVAL seconds. + + """ + LOGGER.info('Scheduling next message for %0.1f seconds', + self.PUBLISH_INTERVAL) + self._connection.add_timeout(self.PUBLISH_INTERVAL, + self.publish_message) + + def publish_message(self): + """If the class is not stopping, publish a message to RabbitMQ, + appending a list of deliveries with the message number that was sent. + This list will be used to check for delivery confirmations in the + on_delivery_confirmations method. + + Once the message has been sent, schedule another message to be sent. + The main reason I put scheduling in was just so you can get a good idea + of how the process is flowing by slowing down and speeding up the + delivery intervals by changing the PUBLISH_INTERVAL constant in the + class. + + """ + if self._channel is None or not self._channel.is_open: + return + + hdrs = {u'مفتاح': u' قيمة', + u'键': u'值', + u'キー': u'値'} + properties = pika.BasicProperties(app_id='example-publisher', + content_type='application/json', + headers=hdrs) + + message = u'مفتاح قيمة 键 值 キー 値' + self._channel.basic_publish(self.EXCHANGE, self.ROUTING_KEY, + json.dumps(message, ensure_ascii=False), + properties) + self._message_number += 1 + self._deliveries.append(self._message_number) + LOGGER.info('Published message # %i', self._message_number) + self.schedule_next_message() + + def run(self): + """Run the example code by connecting and then starting the IOLoop. + + """ + while not self._stopping: + self._connection = None + self._deliveries = [] + self._acked = 0 + self._nacked = 0 + self._message_number = 0 + + try: + self._connection = self.connect() + self._connection.ioloop.start() + except KeyboardInterrupt: + self.stop() + if (self._connection is not None and + not self._connection.is_closed): + # Finish closing + self._connection.ioloop.start() + + LOGGER.info('Stopped') + + def stop(self): + """Stop the example by closing the channel and connection. We + set a flag here so that we stop scheduling new messages to be + published. The IOLoop is started because this method is + invoked by the Try/Catch below when KeyboardInterrupt is caught. + Starting the IOLoop again will allow the publisher to cleanly + disconnect from RabbitMQ. + + """ + LOGGER.info('Stopping') + self._stopping = True + self.close_channel() + self.close_connection() + + def close_channel(self): + """Invoke this command to close the channel with RabbitMQ by sending + the Channel.Close RPC command. + + """ + if self._channel is not None: + LOGGER.info('Closing the channel') + self._channel.close() + + def close_connection(self): + """This method closes the connection to RabbitMQ.""" + if self._connection is not None: + LOGGER.info('Closing connection') + self._connection.close() + + +def main(): + logging.basicConfig(level=logging.DEBUG, format=LOG_FORMAT) + + # Connect to localhost:5672 as guest with the password guest and virtual host "/" (%2F) + example = ExamplePublisher('amqp://guest:guest@localhost:5672/%2F?connection_attempts=3&heartbeat_interval=3600') + example.run() + + +if __name__ == '__main__': + main() diff -Nru python-pika-0.10.0/examples/direct_reply_to.py python-pika-0.11.0/examples/direct_reply_to.py --- python-pika-0.10.0/examples/direct_reply_to.py 1970-01-01 00:00:00.000000000 +0000 +++ python-pika-0.11.0/examples/direct_reply_to.py 2017-08-29 16:54:39.000000000 +0000 @@ -0,0 +1,80 @@ +# -*- coding: utf-8 -*- + +""" +This example demonstrates RabbitMQ's "Direct reply-to" usage via +`pika.BlockingConnection`. See https://www.rabbitmq.com/direct-reply-to.html +for more info about this feature. +""" + +import pika + + +SERVER_QUEUE = 'rpc.server.queue' + + +def main(): + """ Here, Client sends "Marco" to RPC Server, and RPC Server replies with + "Polo". + + NOTE Normally, the server would be running separately from the client, but + in this very simple example both are running in the same thread and sharing + connection and channel. + + """ + with pika.BlockingConnection() as conn: + channel = conn.channel() + + # Set up server + + channel.queue_declare(queue=SERVER_QUEUE, + exclusive=True, + auto_delete=True) + channel.basic_consume(on_server_rx_rpc_request, queue=SERVER_QUEUE) + + + # Set up client + + # NOTE Client must create its consumer and publish RPC requests on the + # same channel to enable the RabbitMQ broker to make the necessary + # associations. + # + # Also, client must create the consumer *before* starting to publish the + # RPC requests. + # + # Client must create its consumer with no_ack=True, because the reply-to + # queue isn't real. + + channel.basic_consume(on_client_rx_reply_from_server, + queue='amq.rabbitmq.reply-to', + no_ack=True) + channel.basic_publish( + exchange='', + routing_key=SERVER_QUEUE, + body='Marco', + properties=pika.BasicProperties(reply_to='amq.rabbitmq.reply-to')) + + channel.start_consuming() + + +def on_server_rx_rpc_request(ch, method_frame, properties, body): + print 'RPC Server got request:', body + + ch.basic_publish('', routing_key=properties.reply_to, body='Polo') + + ch.basic_ack(delivery_tag=method_frame.delivery_tag) + + print 'RPC Server says good bye' + + +def on_client_rx_reply_from_server(ch, method_frame, properties, body): + print 'RPC Client got reply:', body + + # NOTE A real client might want to make additional RPC requests, but in this + # simple example we're closing the channel after getting our first reply + # to force control to return from channel.start_consuming() + print 'RPC Client says bye' + ch.close() + + +if __name__ == '__main__': + main() diff -Nru python-pika-0.10.0/examples/heatbeat_and_blocked_timeouts.py python-pika-0.11.0/examples/heatbeat_and_blocked_timeouts.py --- python-pika-0.10.0/examples/heatbeat_and_blocked_timeouts.py 1970-01-01 00:00:00.000000000 +0000 +++ python-pika-0.11.0/examples/heatbeat_and_blocked_timeouts.py 2017-08-29 16:54:39.000000000 +0000 @@ -0,0 +1,48 @@ +""" +This example demonstrates explicit setting of heartbeat and blocked connection +timeouts. + +Starting with RabbitMQ 3.5.5, the broker's default hearbeat timeout decreased +from 580 seconds to 60 seconds. As a result, applications that perform lengthy +processing in the same thread that also runs their Pika connection may +experience unexpected dropped connections due to heartbeat timeout. Here, we +specify an explicit lower bound for heartbeat timeout. + +When RabbitMQ broker is running out of certain resources, such as memory and +disk space, it may block connections that are performing resource-consuming +operations, such as publishing messages. Once a connection is blocked, RabbiMQ +stops reading from that connection's socket, so no commands from the client will +get through to te broker on that connection until the broker unblocks it. A +blocked connection may last for an indefinite period of time, stalling the +connection and possibly resulting in a hang (e.g., in BlockingConnection) until +the connection is unblocked. Blocked Connectin Timeout is intended to interrupt +(i.e., drop) a connection that has been blocked longer than the given timeout +value. +""" + + +import pika + + +def main(): + + # NOTE: These paramerers work with all Pika connection types + params = pika.ConnectionParameters(heartbeat_interval=600, + blocked_connection_timeout=300) + + conn = pika.BlockingConnection(params) + + chan = conn.channel() + + chan.basic_publish('', 'my-alphabet-queue', "abc") + + # If publish causes the connection to become blocked, then this conn.close() + # would hang until the connection is unblocked, if ever. However, the + # blocked_connection_timeout connection parameter would interrupt the wait, + # resulting in ConnectionClosed exception from BlockingConnection (or the + # on_connection_closed callback call in an asynchronous adapter) + conn.close() + + +if __name__ == '__main__': + main() diff -Nru python-pika-0.10.0/examples/send.py python-pika-0.11.0/examples/send.py --- python-pika-0.10.0/examples/send.py 2015-09-02 17:29:24.000000000 +0000 +++ python-pika-0.11.0/examples/send.py 2017-08-29 16:54:39.000000000 +0000 @@ -28,14 +28,14 @@ properties=pika.BasicProperties(content_type='text/plain', app_id='test', delivery_mode=1)): - print 'Delivery not confirmed' + print('Delivery not confirmed') else: - print 'Confirmed delivery' + print('Confirmed delivery') channel.close() connection.close() duration = time.time() - start_time -print "Published %i messages in %.4f seconds (%.2f messages per second)" % (ITERATIONS, duration, (ITERATIONS/duration)) +print("Published %i messages in %.4f seconds (%.2f messages per second)" % (ITERATIONS, duration, (ITERATIONS/duration))) """ diff -Nru python-pika-0.10.0/examples/tmp.py python-pika-0.11.0/examples/tmp.py --- python-pika-0.10.0/examples/tmp.py 2015-09-02 17:29:24.000000000 +0000 +++ python-pika-0.11.0/examples/tmp.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,363 +0,0 @@ -# -*- coding: utf-8 -*- -import logging -import pika -import json - -LOG_FORMAT = ('%(levelname) -10s %(asctime)s %(name) -30s %(funcName) ' - '-35s %(lineno) -5d: %(message)s') -LOGGER = logging.getLogger(__name__) - - -class ExamplePublisher(object): - """This is an example publisher that will handle unexpected interactions - with RabbitMQ such as channel and connection closures. - - If RabbitMQ closes the connection, it will reopen it. You should - look at the output, as there are limited reasons why the connection may - be closed, which usually are tied to permission related issues or - socket timeouts. - - It uses delivery confirmations and illustrates one way to keep track of - messages that have been sent and if they've been confirmed by RabbitMQ. - - """ - EXCHANGE = 'message' - EXCHANGE_TYPE = 'topic' - PUBLISH_INTERVAL = 1 - QUEUE = 'text' - ROUTING_KEY = 'example.text' - URLS = ['amqp://test:test@localhost:5672/%2F', - 'amqp://guest:guest@localhost:5672/%2F'] - - def __init__(self): - """Setup the example publisher object, passing in the URL we will use - to connect to RabbitMQ. - - """ - self._connection = None - self._channel = None - self._deliveries = [] - self._acked = 0 - self._nacked = 0 - self._message_number = 0 - self._stopping = False - self._closing = False - self._url_offset = 0 - - def connect(self): - """This method connects to RabbitMQ, returning the connection handle. - When the connection is established, the on_connection_open method - will be invoked by pika. - - :rtype: pika.SelectConnection - - """ - url = self.URLS[self._url_offset] - self._url_offset += 1 - if self._url_offset == len(self.URLS): - self._url_offset = 0 - LOGGER.info('Connecting to %s', url) - return pika.SelectConnection(pika.URLParameters(url), - self.on_connection_open, - False) - - def close_connection(self): - """This method closes the connection to RabbitMQ.""" - LOGGER.info('Closing connection') - self._closing = True - self._connection.close() - - def add_on_connection_close_callback(self): - """This method adds an on close callback that will be invoked by pika - when RabbitMQ closes the connection to the publisher unexpectedly. - - """ - LOGGER.info('Adding connection close callback') - self._connection.add_on_close_callback(self.on_connection_closed) - - def on_connection_closed(self, connection, reply_code, reply_text): - """This method is invoked by pika when the connection to RabbitMQ is - closed unexpectedly. Since it is unexpected, we will reconnect to - RabbitMQ if it disconnects. - - :param pika.connection.Connection connection: The closed connection obj - :param int reply_code: The server provided reply_code if given - :param str reply_text: The server provided reply_text if given - - """ - self._channel = None - if self._closing: - self._connection.ioloop.stop() - else: - LOGGER.warning('Connection closed, reopening in 5 seconds: (%s) %s', - reply_code, reply_text) - self._connection.add_timeout(5, self.reconnect) - - def on_connection_open(self, unused_connection): - """This method is called by pika once the connection to RabbitMQ has - been established. It passes the handle to the connection object in - case we need it, but in this case, we'll just mark it unused. - - :type unused_connection: pika.SelectConnection - - """ - LOGGER.info('Connection opened') - self.add_on_connection_close_callback() - self.open_channel() - - def reconnect(self): - """Will be invoked by the IOLoop timer if the connection is - closed. See the on_connection_closed method. - - """ - # This is the old connection IOLoop instance, stop its ioloop - self._connection.ioloop.stop() - - # Create a new connection - self._connection = self.connect() - - # There is now a new connection, needs a new ioloop to run - self._connection.ioloop.start() - - def add_on_channel_close_callback(self): - """This method tells pika to call the on_channel_closed method if - RabbitMQ unexpectedly closes the channel. - - """ - LOGGER.info('Adding channel close callback') - self._channel.add_on_close_callback(self.on_channel_closed) - - def on_channel_closed(self, channel, reply_code, reply_text): - """Invoked by pika when RabbitMQ unexpectedly closes the channel. - Channels are usually closed if you attempt to do something that - violates the protocol, such as re-declare an exchange or queue with - different parameters. In this case, we'll close the connection - to shutdown the object. - - :param pika.channel.Channel: The closed channel - :param int reply_code: The numeric reason the channel was closed - :param str reply_text: The text reason the channel was closed - - """ - LOGGER.warning('Channel was closed: (%s) %s', reply_code, reply_text) - self._deliveries = [] - self._message_number = 0 - if not self._closing: - self._connection.close() - - def on_channel_open(self, channel): - """This method is invoked by pika when the channel has been opened. - The channel object is passed in so we can make use of it. - - Since the channel is now open, we'll declare the exchange to use. - - :param pika.channel.Channel channel: The channel object - - """ - LOGGER.info('Channel opened') - self._channel = channel - self.add_on_channel_close_callback() - self.setup_exchange(self.EXCHANGE) - - def setup_exchange(self, exchange_name): - """Setup the exchange on RabbitMQ by invoking the Exchange.Declare RPC - command. When it is complete, the on_exchange_declareok method will - be invoked by pika. - - :param str|unicode exchange_name: The name of the exchange to declare - - """ - LOGGER.info('Declaring exchange %s', exchange_name) - self._channel.exchange_declare(self.on_exchange_declareok, - exchange_name, - self.EXCHANGE_TYPE) - - def on_exchange_declareok(self, unused_frame): - """Invoked by pika when RabbitMQ has finished the Exchange.Declare RPC - command. - - :param pika.Frame.Method unused_frame: Exchange.DeclareOk response frame - - """ - LOGGER.info('Exchange declared') - self.setup_queue(self.QUEUE) - - def setup_queue(self, queue_name): - """Setup the queue on RabbitMQ by invoking the Queue.Declare RPC - command. When it is complete, the on_queue_declareok method will - be invoked by pika. - - :param str|unicode queue_name: The name of the queue to declare. - - """ - LOGGER.info('Declaring queue %s', queue_name) - self._channel.queue_declare(self.on_queue_declareok, queue_name) - - def on_queue_declareok(self, method_frame): - """Method invoked by pika when the Queue.Declare RPC call made in - setup_queue has completed. In this method we will bind the queue - and exchange together with the routing key by issuing the Queue.Bind - RPC command. When this command is complete, the on_bindok method will - be invoked by pika. - - :param pika.frame.Method method_frame: The Queue.DeclareOk frame - - """ - LOGGER.info('Binding %s to %s with %s', - self.EXCHANGE, self.QUEUE, self.ROUTING_KEY) - self._channel.queue_bind(self.on_bindok, self.QUEUE, - self.EXCHANGE, self.ROUTING_KEY) - - def on_delivery_confirmation(self, method_frame): - """Invoked by pika when RabbitMQ responds to a Basic.Publish RPC - command, passing in either a Basic.Ack or Basic.Nack frame with - the delivery tag of the message that was published. The delivery tag - is an integer counter indicating the message number that was sent - on the channel via Basic.Publish. Here we're just doing house keeping - to keep track of stats and remove message numbers that we expect - a delivery confirmation of from the list used to keep track of messages - that are pending confirmation. - - :param pika.frame.Method method_frame: Basic.Ack or Basic.Nack frame - - """ - confirmation_type = method_frame.method.NAME.split('.')[1].lower() - LOGGER.info('Received %s for delivery tag: %i', - confirmation_type, - method_frame.method.delivery_tag) - if confirmation_type == 'ack': - self._acked += 1 - elif confirmation_type == 'nack': - self._nacked += 1 - self._deliveries.remove(method_frame.method.delivery_tag) - LOGGER.info('Published %i messages, %i have yet to be confirmed, ' - '%i were acked and %i were nacked', - self._message_number, len(self._deliveries), - self._acked, self._nacked) - - def enable_delivery_confirmations(self): - """Send the Confirm.Select RPC method to RabbitMQ to enable delivery - confirmations on the channel. The only way to turn this off is to close - the channel and create a new one. - - When the message is confirmed from RabbitMQ, the - on_delivery_confirmation method will be invoked passing in a Basic.Ack - or Basic.Nack method from RabbitMQ that will indicate which messages it - is confirming or rejecting. - - """ - LOGGER.info('Issuing Confirm.Select RPC command') - self._channel.confirm_delivery(self.on_delivery_confirmation) - - def publish_message(self): - """If the class is not stopping, publish a message to RabbitMQ, - appending a list of deliveries with the message number that was sent. - This list will be used to check for delivery confirmations in the - on_delivery_confirmations method. - - Once the message has been sent, schedule another message to be sent. - The main reason I put scheduling in was just so you can get a good idea - of how the process is flowing by slowing down and speeding up the - delivery intervals by changing the PUBLISH_INTERVAL constant in the - class. - - """ - if self._stopping: - return - - message = {u'مفتاح': u' قيمة', - u'键': u'值', - u'キー': u'値'} - properties = pika.BasicProperties(app_id='example-publisher', - content_type='text/plain', - headers=message) - - self._channel.basic_publish(self.EXCHANGE, self.ROUTING_KEY, - json.dumps(message, ensure_ascii=False), - properties) - self._message_number += 1 - self._deliveries.append(self._message_number) - LOGGER.info('Published message # %i', self._message_number) - self.schedule_next_message() - - def schedule_next_message(self): - """If we are not closing our connection to RabbitMQ, schedule another - message to be delivered in PUBLISH_INTERVAL seconds. - - """ - if self._stopping: - return - LOGGER.info('Scheduling next message for %0.1f seconds', - self.PUBLISH_INTERVAL) - self._connection.add_timeout(self.PUBLISH_INTERVAL, - self.publish_message) - - def start_publishing(self): - """This method will enable delivery confirmations and schedule the - first message to be sent to RabbitMQ - - """ - LOGGER.info('Issuing consumer related RPC commands') - self.enable_delivery_confirmations() - self.schedule_next_message() - - def on_bindok(self, unused_frame): - """This method is invoked by pika when it receives the Queue.BindOk - response from RabbitMQ. Since we know we're now setup and bound, it's - time to start publishing.""" - LOGGER.info('Queue bound') - self.start_publishing() - - def close_channel(self): - """Invoke this command to close the channel with RabbitMQ by sending - the Channel.Close RPC command. - - """ - LOGGER.info('Closing the channel') - if self._channel: - self._channel.close() - - def open_channel(self): - """This method will open a new channel with RabbitMQ by issuing the - Channel.Open RPC command. When RabbitMQ confirms the channel is open - by sending the Channel.OpenOK RPC reply, the on_channel_open method - will be invoked. - - """ - LOGGER.info('Creating a new channel') - self._connection.channel(on_open_callback=self.on_channel_open) - - def run(self): - """Run the example code by connecting and then starting the IOLoop. - - """ - self._connection = self.connect() - self._connection.ioloop.start() - - def stop(self): - """Stop the example by closing the channel and connection. We - set a flag here so that we stop scheduling new messages to be - published. The IOLoop is started because this method is - invoked by the Try/Catch below when KeyboardInterrupt is caught. - Starting the IOLoop again will allow the publisher to cleanly - disconnect from RabbitMQ. - - """ - LOGGER.info('Stopping') - self._stopping = True - self.close_channel() - self.close_connection() - self._connection.ioloop.start() - LOGGER.info('Stopped') - -def main(): - logging.basicConfig(level=logging.DEBUG, format=LOG_FORMAT) - - example = ExamplePublisher() - try: - example.run() - except KeyboardInterrupt: - example.stop() - -if __name__ == '__main__': - main() diff -Nru python-pika-0.10.0/examples/twisted_service.py python-pika-0.11.0/examples/twisted_service.py --- python-pika-0.10.0/examples/twisted_service.py 2015-09-02 17:29:24.000000000 +0000 +++ python-pika-0.11.0/examples/twisted_service.py 2017-08-29 16:54:39.000000000 +0000 @@ -6,7 +6,7 @@ # - Post by Brian Chandler # https://groups.google.com/forum/#!topic/pika-python/o_deVmGondk # - Pika Documentation -# http://pika.readthedocs.org/en/latest/examples/twisted_example.html +# https://pika.readthedocs.io/en/latest/examples/twisted_example.html Fire up this test application via `twistd -ny twisted_service.py` diff -Nru python-pika-0.10.0/.gitignore python-pika-0.11.0/.gitignore --- python-pika-0.10.0/.gitignore 2015-09-02 17:29:24.000000000 +0000 +++ python-pika-0.11.0/.gitignore 2017-08-29 16:54:39.000000000 +0000 @@ -4,6 +4,7 @@ .coverage .tox .DS_Store +.python-version pika.iml codegen pika.egg-info @@ -14,3 +15,4 @@ dist docs/_build *.conf.in +venvs/ diff -Nru python-pika-0.10.0/LICENSE python-pika-0.11.0/LICENSE --- python-pika-0.10.0/LICENSE 2015-09-02 17:29:24.000000000 +0000 +++ python-pika-0.11.0/LICENSE 2017-08-29 16:54:39.000000000 +0000 @@ -1,4 +1,4 @@ -Copyright (c) 2009-2015, Tony Garnock-Jones, Gavin M. Roy, Pivotal and others. +Copyright (c) 2009-2017, Tony Garnock-Jones, Gavin M. Roy, Pivotal and others. All rights reserved. Redistribution and use in source and binary forms, with or without modification, diff -Nru python-pika-0.10.0/nose.cfg python-pika-0.11.0/nose.cfg --- python-pika-0.10.0/nose.cfg 2015-09-02 17:29:24.000000000 +0000 +++ python-pika-0.11.0/nose.cfg 1970-01-01 00:00:00.000000000 +0000 @@ -1,3 +0,0 @@ -[nosetests] -verbosity=3 -tests=tests/unit,tests/acceptance diff -Nru python-pika-0.10.0/pika/adapters/asyncio_connection.py python-pika-0.11.0/pika/adapters/asyncio_connection.py --- python-pika-0.10.0/pika/adapters/asyncio_connection.py 1970-01-01 00:00:00.000000000 +0000 +++ python-pika-0.11.0/pika/adapters/asyncio_connection.py 2017-08-29 16:54:39.000000000 +0000 @@ -0,0 +1,221 @@ +"""Use pika with the Asyncio EventLoop""" +import asyncio +from functools import partial + +from pika.adapters import base_connection + + +class IOLoopAdapter: + def __init__(self, loop): + """ + Basic adapter for asyncio event loop + + :type loop: asyncio.AbstractEventLoop + :param loop: Asyncio Loop + + """ + self.loop = loop + + self.handlers = {} + self.readers = set() + self.writers = set() + + def add_timeout(self, deadline, callback_method): + """Add the callback_method to the EventLoop timer to fire after deadline + seconds. Returns a Handle to the timeout. + + :param int deadline: The number of seconds to wait to call callback + :param method callback_method: The callback method + :rtype: asyncio.Handle + + """ + return self.loop.call_later(deadline, callback_method) + + @staticmethod + def remove_timeout(handle): + """ + Cancel asyncio.Handle + + :type handle: asyncio.Handle + :rtype: bool + """ + return handle.cancel() + + def add_handler(self, fd, cb, event_state): + """ Registers the given handler to receive the given events for ``fd``. + + The ``fd`` argument is an integer file descriptor. + + The ``event_state`` argument is a bitwise or of the constants + ``base_connection.BaseConnection.READ``, ``base_connection.BaseConnection.WRITE``, + and ``base_connection.BaseConnection.ERROR``. + + """ + + if fd in self.handlers: + raise ValueError("fd {} added twice".format(fd)) + self.handlers[fd] = cb + + if event_state & base_connection.BaseConnection.READ: + self.loop.add_reader( + fd, + partial( + cb, + fd=fd, + events=base_connection.BaseConnection.READ + ) + ) + self.readers.add(fd) + + if event_state & base_connection.BaseConnection.WRITE: + self.loop.add_writer( + fd, + partial( + cb, + fd=fd, + events=base_connection.BaseConnection.WRITE + ) + ) + self.writers.add(fd) + + def remove_handler(self, fd): + """ Stop listening for events on ``fd``. """ + + if fd not in self.handlers: + return + + if fd in self.readers: + self.loop.remove_reader(fd) + self.readers.remove(fd) + + if fd in self.writers: + self.loop.remove_writer(fd) + self.writers.remove(fd) + + del self.handlers[fd] + + def update_handler(self, fd, event_state): + if event_state & base_connection.BaseConnection.READ: + if fd not in self.readers: + self.loop.add_reader( + fd, + partial( + self.handlers[fd], + fd=fd, + events=base_connection.BaseConnection.READ + ) + ) + self.readers.add(fd) + else: + if fd in self.readers: + self.loop.remove_reader(fd) + self.readers.remove(fd) + + if event_state & base_connection.BaseConnection.WRITE: + if fd not in self.writers: + self.loop.add_writer( + fd, + partial( + self.handlers[fd], + fd=fd, + events=base_connection.BaseConnection.WRITE + ) + ) + self.writers.add(fd) + else: + if fd in self.writers: + self.loop.remove_writer(fd) + self.writers.remove(fd) + + + def start(self): + """ Start Event Loop """ + if self.loop.is_running(): + return + + self.loop.run_forever() + + def stop(self): + """ Stop Event Loop """ + if self.loop.is_closed(): + return + + self.loop.stop() + + +class AsyncioConnection(base_connection.BaseConnection): + """ The AsyncioConnection runs on the Asyncio EventLoop. + + :param pika.connection.Parameters parameters: Connection parameters + :param on_open_callback: The method to call when the connection is open + :type on_open_callback: method + :param on_open_error_callback: Method to call if the connection cant be opened + :type on_open_error_callback: method + :param asyncio.AbstractEventLoop loop: By default asyncio.get_event_loop() + + """ + def __init__(self, + parameters=None, + on_open_callback=None, + on_open_error_callback=None, + on_close_callback=None, + stop_ioloop_on_close=False, + custom_ioloop=None): + """ Create a new instance of the AsyncioConnection class, connecting + to RabbitMQ automatically + + :param pika.connection.Parameters parameters: Connection parameters + :param on_open_callback: The method to call when the connection is open + :type on_open_callback: method + :param on_open_error_callback: Method to call if the connection cant be opened + :type on_open_error_callback: method + :param asyncio.AbstractEventLoop loop: By default asyncio.get_event_loop() + + """ + self.sleep_counter = 0 + self.loop = custom_ioloop or asyncio.get_event_loop() + self.ioloop = IOLoopAdapter(self.loop) + + super().__init__( + parameters, on_open_callback, + on_open_error_callback, + on_close_callback, self.ioloop, + stop_ioloop_on_close=stop_ioloop_on_close, + ) + + def _adapter_connect(self): + """Connect to the remote socket, adding the socket to the EventLoop if + connected. + + :rtype: bool + + """ + error = super()._adapter_connect() + + if not error: + self.ioloop.add_handler( + self.socket.fileno(), + self._handle_events, + self.event_state, + ) + + return error + + def _adapter_disconnect(self): + """Disconnect from the RabbitMQ broker""" + + if self.socket: + self.ioloop.remove_handler( + self.socket.fileno() + ) + + super()._adapter_disconnect() + + def _handle_disconnect(self): + # No other way to handle exceptions.ProbableAuthenticationError + try: + super()._handle_disconnect() + super()._handle_write() + except Exception as e: + # FIXME: Pass None or other constant instead "-1" + self._on_disconnect(-1, e) diff -Nru python-pika-0.10.0/pika/adapters/base_connection.py python-pika-0.11.0/pika/adapters/base_connection.py --- python-pika-0.10.0/pika/adapters/base_connection.py 2015-09-02 17:29:24.000000000 +0000 +++ python-pika-0.11.0/pika/adapters/base_connection.py 2017-08-29 16:54:39.000000000 +0000 @@ -10,7 +10,6 @@ import pika.compat from pika import connection -from pika import exceptions try: SOL_TCP = socket.SOL_TCP @@ -36,7 +35,8 @@ WRITE = 0x0004 ERROR = 0x0008 - ERRORS_TO_ABORT = [errno.EBADF, errno.ECONNABORTED, errno.EPIPE] + ERRORS_TO_ABORT = [errno.EBADF, errno.ECONNABORTED, errno.EPIPE, + errno.ETIMEDOUT] ERRORS_TO_IGNORE = [errno.EWOULDBLOCK, errno.EAGAIN, errno.EINTR] DO_HANDSHAKE = True WARN_ABOUT_IOLOOP = False @@ -52,10 +52,10 @@ :param pika.connection.Parameters parameters: Connection parameters :param method on_open_callback: Method to call on connection open - :param on_open_error_callback: Method to call if the connection cant - be opened - :type on_open_error_callback: method - :param method on_close_callback: Method to call on connection close + :param method on_open_error_callback: Called if the connection can't + be established: on_open_error_callback(connection, str|exception) + :param method on_close_callback: Called when the connection is closed: + on_close_callback(connection, reason_code, reason_text) :param object ioloop: IOLoop object to use :param bool stop_ioloop_on_close: Call ioloop.stop() if disconnected :raises: RuntimeError @@ -79,6 +79,35 @@ on_open_error_callback, on_close_callback) + def __repr__(self): + def get_socket_repr(sock): + """Return socket info suitable for use in repr""" + if sock is None: + return None + + sockname = None + peername = None + try: + sockname = sock.getsockname() + except socket.error: + # closed? + pass + else: + try: + peername = sock.getpeername() + except socket.error: + # not connected? + pass + + return '%s->%s' % (sockname, peername) + + return ( + '<%s %s socket=%s params=%s>' % + (self.__class__.__name__, + self._STATE_NAMES[self.connection_state], + get_socket_repr(self.socket), + self.params)) + def add_timeout(self, deadline, callback_method): """Add the callback_method to the IOLoop timer to fire after deadline seconds. Returns a handle to the timeout @@ -100,8 +129,11 @@ :param str reply_text: The text reason for the close """ - super(BaseConnection, self).close(reply_code, reply_text) - self._handle_ioloop_stop() + try: + super(BaseConnection, self).close(reply_code, reply_text) + finally: + if self.is_closed: + self._handle_ioloop_stop() def remove_timeout(self, timeout_id): """Remove the timeout from the IOLoop by the ID returned from @@ -121,9 +153,10 @@ # Get the addresses for the socket, supporting IPv4 & IPv6 while True: try: - addresses = socket.getaddrinfo(self.params.host, self.params.port, - 0, socket.SOCK_STREAM, - socket.IPPROTO_TCP) + addresses = self._getaddrinfo(self.params.host, + self.params.port, + 0, socket.SOCK_STREAM, + socket.IPPROTO_TCP) break except _SOCKET_ERROR as error: if error.errno == errno.EINTR: @@ -149,38 +182,9 @@ def _adapter_disconnect(self): """Invoked if the connection is being told to disconnect""" try: - self._remove_heartbeat() self._cleanup_socket() - self._check_state_on_disconnect() finally: - # Ensure proper cleanup since _check_state_on_disconnect may raise - # an exception self._handle_ioloop_stop() - self._init_connection_state() - - def _check_state_on_disconnect(self): - """Checks to see if we were in opening a connection with RabbitMQ when - we were disconnected and raises exceptions for the anticipated - exception types. - - """ - if self.connection_state == self.CONNECTION_PROTOCOL: - LOGGER.error('Incompatible Protocol Versions') - raise exceptions.IncompatibleProtocolError - elif self.connection_state == self.CONNECTION_START: - LOGGER.error("Socket closed while authenticating indicating a " - "probable authentication error") - raise exceptions.ProbableAuthenticationError - elif self.connection_state == self.CONNECTION_TUNE: - LOGGER.error("Socket closed while tuning the connection indicating " - "a probable permission error when accessing a virtual " - "host") - raise exceptions.ProbableAccessDeniedError - elif self.is_open: - LOGGER.warning("Socket closed when connection was open") - elif not self.is_closed and not self.is_closing: - LOGGER.warning('Unknown state on disconnect: %i', - self.connection_state) def _cleanup_socket(self): """Close the socket cleanly""" @@ -197,7 +201,8 @@ :returns: error string on failure; None on success """ - self.socket = socket.socket(sock_addr_tuple[0], socket.SOCK_STREAM, 0) + self.socket = self._create_tcp_connection_socket( + sock_addr_tuple[0], sock_addr_tuple[1], sock_addr_tuple[2]) self.socket.setsockopt(SOL_TCP, socket.TCP_NODELAY, 1) self.socket.settimeout(self.params.socket_timeout) @@ -240,6 +245,18 @@ # Made it this far return None + @staticmethod + def _create_tcp_connection_socket(sock_family, sock_type, sock_proto): + """ Create TCP/IP stream socket for AMQP connection + + :param int sock_family: socket family + :param int sock_type: socket type + :param int sock_proto: socket protocol number + + NOTE We break this out to make it easier to patch in mock tests + """ + return socket.socket(sock_family, sock_type, sock_proto) + def _do_ssl_handshake(self): """Perform SSL handshaking, copied from python stdlib test_ssl.py. @@ -250,7 +267,10 @@ try: self.socket.do_handshake() break + # TODO should be using SSLWantReadError, etc. directly except ssl.SSLError as err: + # TODO these exc are for non-blocking sockets, but ours isn't + # at this stage, so it's not clear why we have this. if err.args[0] == ssl.SSL_ERROR_WANT_READ: self.event_state = self.READ elif err.args[0] == ssl.SSL_ERROR_WANT_WRITE: @@ -260,6 +280,12 @@ self._manage_event_state() @staticmethod + def _getaddrinfo(host, port, family, socktype, proto): + """Wrap `socket.getaddrinfo` to make it easier to patch for unit tests + """ + return socket.getaddrinfo(host, port, family, socktype, proto) + + @staticmethod def _get_error_code(error_value): """Get the error code from the error_value accounting for Python version differences. @@ -269,25 +295,28 @@ """ if not error_value: return None + if hasattr(error_value, 'errno'): # Python >= 2.6 return error_value.errno - elif error_value is not None: + else: + # TODO this doesn't look right; error_value.args[0] ??? Could + # probably remove this code path since pika doesn't test against + # Python 2.5 return error_value[0] # Python <= 2.5 - return None def _flush_outbound(self): - """write early, if the socket will take the data why not get it out - there asap. + """Have the state manager schedule the necessary I/O. """ - self._handle_write() + # NOTE: We don't call _handle_write() from this context, because pika + # code was not designed to be writing to (or reading from) the socket + # from any methods, except from ioloop handler callbacks. Many methods + # in pika core and adapters do not deal gracefully with connection + # errors occurring in their context; e.g., Connection.channel (pika + # issue #659), Connection._on_connection_tune (if connection loss is + # detected in _send_connection_tune_ok, before _send_connection_open is + # called), etc., etc., etc. self._manage_event_state() - def _handle_disconnect(self): - """Called internally when the socket is disconnected already - """ - self._adapter_disconnect() - self._on_connection_closed(None, True) - def _handle_ioloop_stop(self): """Invoked when the connection is closed to determine if the IOLoop should be stopped or not. @@ -305,9 +334,10 @@ :param int|object error_value: The inbound error """ - if 'timed out' in str(error_value): - raise socket.timeout + # TODO doesn't seem right: docstring defines error_value as int|object, + # but _get_error_code expects a falsie or an exception-like object error_code = self._get_error_code(error_value) + if not error_code: LOGGER.critical("Tried to handle an error where no error existed") return @@ -324,6 +354,8 @@ elif self.params.ssl and isinstance(error_value, ssl.SSLError): if error_value.args[0] == ssl.SSL_ERROR_WANT_READ: + # TODO doesn't seem right: this logic updates event state, but + # the logic at the bottom unconditionaly disconnects anyway. self.event_state = self.READ elif error_value.args[0] == ssl.SSL_ERROR_WANT_WRITE: self.event_state = self.WRITE @@ -335,20 +367,22 @@ LOGGER.error("Socket Error: %s", error_code) # Disconnect from our IOLoop and let Connection know what's up - self._handle_disconnect() + self._on_terminate(connection.InternalCloseReasons.SOCKET_ERROR, + repr(error_value)) def _handle_timeout(self): """Handle a socket timeout in read or write. We don't do anything in the non-blocking handlers because we only have the socket in a blocking state during connect.""" - pass + LOGGER.warning("Unexpected socket timeout") def _handle_events(self, fd, events, error=None, write_only=False): """Handle IO/Event loop events, processing them. :param int fd: The file descriptor for the events :param int events: Events from the IO/Event loop - :param int error: Was an error specified + :param int error: Was an error specified; TODO none of the current + adapters appear to be able to pass the `error` arg - is it needed? :param bool write_only: Only handle write events """ @@ -364,10 +398,12 @@ self._handle_read() if (self.socket and write_only and (events & self.READ) and - (events & self.ERROR)): - LOGGER.error('BAD libc: Write-Only but Read+Error. ' + (events & self.ERROR)): + error_msg = ('BAD libc: Write-Only but Read+Error. ' 'Assume socket disconnected.') - self._handle_disconnect() + LOGGER.error(error_msg) + self._on_terminate(connection.InternalCloseReasons.SOCKET_ERROR, + error_msg) if self.socket and (events & self.ERROR): LOGGER.error('Error event %r, %r', events, error) @@ -409,7 +445,9 @@ # Empty data, should disconnect if not data or data == 0: LOGGER.error('Read empty data, calling disconnect') - return self._handle_disconnect() + return self._on_terminate( + connection.InternalCloseReasons.SOCKET_ERROR, + "EOF") # Pass the data into our top level frame dispatching method self._on_data_available(data) @@ -418,13 +456,13 @@ def _handle_write(self): """Try and write as much as we can, if we get blocked requeue what's left""" - bytes_written = 0 + total_bytes_sent = 0 try: while self.outbound_buffer: frame = self.outbound_buffer.popleft() while True: try: - bw = self.socket.send(frame) + num_bytes_sent = self.socket.send(frame) break except _SOCKET_ERROR as error: if error.errno == errno.EINTR: @@ -432,10 +470,10 @@ else: raise - bytes_written += bw - if bw < len(frame): + total_bytes_sent += num_bytes_sent + if num_bytes_sent < len(frame): LOGGER.debug("Partial write, requeing remaining data") - self.outbound_buffer.appendleft(frame[bw:]) + self.outbound_buffer.appendleft(frame[num_bytes_sent:]) break except socket.timeout: @@ -451,7 +489,7 @@ else: return self._handle_error(error) - return bytes_written + return total_bytes_sent def _init_connection_state(self): @@ -486,6 +524,7 @@ :rtype: ssl.SSLSocket """ + ssl_options = self.params.ssl_options or {} return ssl.wrap_socket(sock, do_handshake_on_connect=self.DO_HANDSHAKE, - **self.params.ssl_options) + **ssl_options) diff -Nru python-pika-0.10.0/pika/adapters/blocking_connection.py python-pika-0.11.0/pika/adapters/blocking_connection.py --- python-pika-0.10.0/pika/adapters/blocking_connection.py 2015-09-02 17:29:24.000000000 +0000 +++ python-pika-0.11.0/pika/adapters/blocking_connection.py 2017-08-29 16:54:39.000000000 +0000 @@ -27,6 +27,7 @@ # NOTE: import SelectConnection after others to avoid circular depenency from pika.adapters.select_connection import SelectConnection + LOGGER = logging.getLogger(__name__) @@ -85,7 +86,7 @@ """True if the object is in a signaled state""" return self._ready - def signal_once(self, *_args, **_kwargs): # pylint: disable=W0613 + def signal_once(self, *_args, **_kwargs): """ Set as ready :raises AssertionError: if result was already signalled @@ -94,7 +95,7 @@ self._ready = True def set_value_once(self, *args, **kwargs): - """ Set as ready with value; the value may be retrived via the `value` + """ Set as ready with value; the value may be retrieved via the `value` property getter :raises AssertionError: if result was already set @@ -161,7 +162,7 @@ return self._values -class _IoloopTimerContext(object): # pylint: disable=R0903 +class _IoloopTimerContext(object): """Context manager for registering and safely unregistering a SelectConnection ioloop-based timer """ @@ -196,7 +197,7 @@ return self._callback_result.is_ready() -class _TimerEvt(object): # pylint: disable=R0903 +class _TimerEvt(object): """Represents a timer created via `BlockingConnection.add_timeout`""" __slots__ = ('timer_id', '_callback') @@ -211,7 +212,7 @@ self.timer_id = None def __repr__(self): - return '%s(timer_id=%s, callback=%s)' % (self.__class__.__name__, + return '<%s timer_id=%s callback=%s>' % (self.__class__.__name__, self.timer_id, self._callback) def dispatch(self): @@ -219,7 +220,7 @@ self._callback() -class _ConnectionBlockedUnblockedEvtBase(object): # pylint: disable=R0903 +class _ConnectionBlockedUnblockedEvtBase(object): """Base class for `_ConnectionBlockedEvt` and `_ConnectionUnblockedEvt`""" __slots__ = ('_callback', '_method_frame') @@ -235,28 +236,26 @@ self._method_frame = method_frame def __repr__(self): - return '%s(callback=%s, frame=%s)' % (self.__class__.__name__, - self._callback, - self._method_frame) + return '<%s callback=%s, frame=%s>' % (self.__class__.__name__, + self._callback, + self._method_frame) def dispatch(self): """Dispatch the user's callback method""" self._callback(self._method_frame) -class _ConnectionBlockedEvt( # pylint: disable=R0903 - _ConnectionBlockedUnblockedEvtBase): +class _ConnectionBlockedEvt(_ConnectionBlockedUnblockedEvtBase): """Represents a Connection.Blocked notification from RabbitMQ broker`""" pass -class _ConnectionUnblockedEvt( # pylint: disable=R0903 - _ConnectionBlockedUnblockedEvtBase): +class _ConnectionUnblockedEvt(_ConnectionBlockedUnblockedEvtBase): """Represents a Connection.Unblocked notification from RabbitMQ broker`""" pass -class BlockingConnection(object): # pylint: disable=R0902 +class BlockingConnection(object): """The BlockingConnection creates a layer on top of Pika's asynchronous core providing methods that will block until their expected response has returned. Due to the asynchronous nature of the `Basic.Deliver` and @@ -265,7 +264,7 @@ receive messages from RabbitMQ using :meth:`basic_consume ` or if you want to be notified of a delivery failure when using - :meth:`basic_publish ` . + :meth:`basic_publish `. For more information about communicating with the blocking_connection adapter, be sure to check out the @@ -273,6 +272,40 @@ :class:`Channel ` based communication for the blocking_connection adapter. + To prevent recursion/reentrancy, the blocking connection and channel + implementations queue asynchronously-delivered events received + in nested context (e.g., while waiting for `BlockingConnection.channel` or + `BlockingChannel.queue_declare` to complete), dispatching them synchronously + once nesting returns to the desired context. This concerns all callbacks, + such as those registered via `BlockingConnection.add_timeout`, + `BlockingConnection.add_on_connection_blocked_callback`, + `BlockingConnection.add_on_connection_unblocked_callback`, + `BlockingChannel.basic_consume`, etc. + + Blocked Connection deadlock avoidance: when RabbitMQ becomes low on + resources, it emits Connection.Blocked (AMQP extension) to the client + connection when client makes a resource-consuming request on that connection + or its channel (e.g., `Basic.Publish`); subsequently, RabbitMQ suspsends + processing requests from that connection until the affected resources are + restored. See http://www.rabbitmq.com/connection-blocked.html. This + may impact `BlockingConnection` and `BlockingChannel` operations in a + way that users might not be expecting. For example, if the user dispatches + `BlockingChannel.basic_publish` in non-publisher-confirmation mode while + RabbitMQ is in this low-resource state followed by a synchronous request + (e.g., `BlockingConnection.channel`, `BlockingChannel.consume`, + `BlockingChannel.basic_consume`, etc.), the synchronous request will block + indefinitely (until Connection.Unblocked) waiting for RabbitMQ to reply. If + the blocked state persists for a long time, the blocking operation will + appear to hang. In this state, `BlockingConnection` instance and its + channels will not dispatch user callbacks. SOLUTION: To break this potential + deadlock, applications may configure the `blocked_connection_timeout` + connection parameter when instantiating `BlockingConnection`. Upon blocked + connection timeout, this adapter will raise ConnectionClosed exception with + first exception arg of + `pika.connection.InternalCloseReasons.BLOCKED_CONNECTION_TIMEOUT`. See + `pika.connection.ConnectionParameters` documentation to learn more about + `blocked_connection_timeout` configuration. + """ # Connection-opened callback args _OnOpenedArgs = namedtuple('BlockingConnection__OnOpenedArgs', @@ -280,7 +313,7 @@ # Connection-establishment error callback args _OnOpenErrorArgs = namedtuple('BlockingConnection__OnOpenErrorArgs', - 'connection error_text') + 'connection error') # Connection-closing callback args _OnClosedArgs = namedtuple('BlockingConnection__OnClosedArgs', @@ -336,10 +369,16 @@ on_close_callback=self._closed_result.set_value_once, stop_ioloop_on_close=False) + self._impl.ioloop.activate_poller() + self._process_io_for_connection_setup() + def __repr__(self): + return '<%s impl=%r>' % (self.__class__.__name__, self._impl) + def _cleanup(self): """Clean up members that might inhibit garbage collection""" + self._impl.ioloop.deactivate_poller() self._ready_events.clear() self._opened_result.reset() self._open_error_result.reset() @@ -363,19 +402,25 @@ # __exit__ part self._event_dispatch_suspend_depth -= 1 - def _process_io_for_connection_setup(self): # pylint: disable=C0103 + def _process_io_for_connection_setup(self): """ Perform follow-up processing for connection setup request: flush connection output and process input while waiting for connection-open or connection-error. :raises AMQPConnectionError: on connection open error """ - self._flush_output(self._opened_result.is_ready, - self._open_error_result.is_ready) + if not self._open_error_result.ready: + self._flush_output(self._opened_result.is_ready, + self._open_error_result.is_ready) if self._open_error_result.ready: - raise exceptions.AMQPConnectionError( - self._open_error_result.value.error_text) + try: + exception_or_message = self._open_error_result.value.error + if isinstance(exception_or_message, Exception): + raise exception_or_message + raise exceptions.AMQPConnectionError(exception_or_message) + finally: + self._cleanup() assert self._opened_result.ready assert self._opened_result.value.connection is self._impl @@ -391,7 +436,7 @@ returning true when it's time to stop processing. Their results are OR'ed together. """ - if self._impl.is_closed: + if self.is_closed: raise exceptions.ConnectionClosed() # Conditions for terminating the processing loop: @@ -401,31 +446,35 @@ # OR # empty outbound buffer and any waiter is ready is_done = (lambda: - self._closed_result.ready or - (not self._impl.outbound_buffer and - (not waiters or any(ready() for ready in waiters)))) + self._closed_result.ready or + (not self._impl.outbound_buffer and + (not waiters or any(ready() for ready in waiters)))) # Process I/O until our completion condition is satisified while not is_done(): self._impl.ioloop.poll() self._impl.ioloop.process_timeouts() - if self._closed_result.ready: + if self._open_error_result.ready or self._closed_result.ready: try: - result = self._closed_result.value - if result.reason_code not in [0, 200]: - LOGGER.critical('Connection close detected; result=%r', - result) - raise exceptions.ConnectionClosed(result.reason_code, - result.reason_text) - elif not self._user_initiated_close: - # NOTE: unfortunately, upon socket error, on_close_callback - # presently passes reason_code=0, so we don't detect that as - # an error - LOGGER.critical('Connection close detected') - raise exceptions.ConnectionClosed() + if not self._user_initiated_close: + if self._open_error_result.ready: + maybe_exception = self._open_error_result.value.error + LOGGER.error('Connection open failed - %r', + maybe_exception) + if isinstance(maybe_exception, Exception): + raise maybe_exception + else: + raise exceptions.ConnectionClosed(maybe_exception) + else: + result = self._closed_result.value + LOGGER.error('Connection close detected; result=%r', + result) + raise exceptions.ConnectionClosed(result.reason_code, + result.reason_text) else: - LOGGER.info('Connection closed; result=%r', result) + LOGGER.info('Connection closed; result=%r', + self._closed_result.value) finally: self._cleanup() @@ -519,16 +568,17 @@ evt.dispatch() - def add_on_connection_blocked_callback(self, # pylint: disable=C0103 - callback_method): + def add_on_connection_blocked_callback(self, callback_method): """Add a callback to be notified when RabbitMQ has sent a `Connection.Blocked` frame indicating that RabbitMQ is low on resources. Publishers can use this to voluntarily suspend publishing, instead of relying on back pressure throttling. The callback will be passed the `Connection.Blocked` method frame. + See also `ConnectionParameters.blocked_connection_timeout`. + :param method callback_method: Callback to call on `Connection.Blocked`, - having the signature callback_method(pika.frame.Method), where the + having the signature `callback_method(pika.frame.Method)`, where the method frame's `method` member is of type `pika.spec.Connection.Blocked` @@ -536,8 +586,7 @@ self._impl.add_on_connection_blocked_callback( functools.partial(self._on_connection_blocked, callback_method)) - def add_on_connection_unblocked_callback(self, # pylint: disable=C0103 - callback_method): + def add_on_connection_unblocked_callback(self, callback_method): """Add a callback to be notified when RabbitMQ has sent a `Connection.Unblocked` frame letting publishers know it's ok to start publishing again. The callback will be passed the @@ -545,7 +594,7 @@ :param method callback_method: Callback to call on `Connection.Unblocked`, having the signature - callback_method(pika.frame.Method), where the method frame's + `callback_method(pika.frame.Method)`, where the method frame's `method` member is of type `pika.spec.Connection.Unblocked` """ @@ -613,6 +662,11 @@ :param str reply_text: The text reason for the close """ + if self.is_closed: + LOGGER.debug('Close called on closed connection (%s): %s', + reply_code, reply_text) + return + LOGGER.info('Closing connection (%s): %s', reply_code, reply_text) self._user_initiated_close = True @@ -621,7 +675,12 @@ for impl_channel in pika.compat.dictvalues(self._impl._channels): channel = impl_channel._get_cookie() if channel.is_open: - channel.close(reply_code, reply_text) + try: + channel.close(reply_code, reply_text) + except exceptions.ChannelClosed as exc: + # Log and suppress broker-closed channel + LOGGER.warning('Got ChannelClosed while closing channel ' + 'from connection.close: %r', exc) # Close the connection self._impl.close(reply_code, reply_text) @@ -637,17 +696,18 @@ seconds. The actual blocking time depends on the granularity of the underlying ioloop. Zero means return as soon as possible. None means there is no limit on processing time and the function will block - until I/O produces actionalable events. Defaults to 0 for backward + until I/O produces actionable events. Defaults to 0 for backward compatibility. This parameter is NEW in pika 0.10.0. """ - common_terminator = lambda: bool( - self._channels_pending_dispatch or self._ready_events) - - if time_limit is None: - self._flush_output(common_terminator) - else: - with _IoloopTimerContext(time_limit, self._impl) as timer: - self._flush_output(timer.is_ready, common_terminator) + with self._acquire_event_dispatch() as dispatch_acquired: + # Check if we can actually process pending events + common_terminator = lambda: bool(dispatch_acquired and + (self._channels_pending_dispatch or self._ready_events)) + if time_limit is None: + self._flush_output(common_terminator) + else: + with _IoloopTimerContext(time_limit, self._impl) as timer: + self._flush_output(timer.is_ready, common_terminator) if self._ready_events: self._dispatch_connection_events() @@ -681,7 +741,7 @@ specify but it is recommended that you let Pika manage the channel numbers. - :rtype: pika.synchronous_connection.BlockingChannel + :rtype: pika.adapters.blocking_connection.BlockingChannel """ with _CallbackResult(self._OnChannelOpenedArgs) as opened_args: impl_channel = self._impl.channel( @@ -704,7 +764,7 @@ # Prepare `with` context return self - def __exit__(self, tp, value, traceback): + def __exit__(self, exc_type, value, traceback): # Close connection after `with` context self.close() @@ -722,7 +782,8 @@ @property def is_closing(self): """ - Returns a boolean reporting the current connection state. + Returns True if connection is in the process of closing due to + client-initiated `close` request, but closing is not yet complete. """ return self._impl.is_closing @@ -747,7 +808,7 @@ return self._impl.basic_nack @property - def consumer_cancel_notify_supported(self): # pylint: disable=C0103 + def consumer_cancel_notify_supported(self): """Specifies if the server supports consumer cancel notification on the active connection. @@ -757,7 +818,7 @@ return self._impl.consumer_cancel_notify @property - def exchange_exchange_bindings_supported(self): # pylint: disable=C0103 + def exchange_exchange_bindings_supported(self): """Specifies if the active connection supports exchange to exchange bindings. @@ -782,12 +843,12 @@ publisher_confirms = publisher_confirms_supported -class _ChannelPendingEvt(object): # pylint: disable=R0903 +class _ChannelPendingEvt(object): """Base class for BlockingChannel pending events""" pass -class _ConsumerDeliveryEvt(_ChannelPendingEvt): # pylint: disable=R0903 +class _ConsumerDeliveryEvt(_ChannelPendingEvt): """This event represents consumer message delivery `Basic.Deliver`; it contains method, properties, and body of the delivered message. """ @@ -807,7 +868,7 @@ self.body = body -class _ConsumerCancellationEvt(_ChannelPendingEvt): # pylint: disable=R0903 +class _ConsumerCancellationEvt(_ChannelPendingEvt): """This event represents server-initiated consumer cancellation delivered to client via Basic.Cancel. After receiving Basic.Cancel, there will be no further deliveries for the consumer identified by `consumer_tag` in @@ -824,8 +885,8 @@ self.method_frame = method_frame def __repr__(self): - return '%s(method_frame=%r)' % (self.__class__.__name__, - self.method_frame) + return '<%s method_frame=%r>' % (self.__class__.__name__, + self.method_frame) @property def method(self): @@ -833,12 +894,12 @@ return self.method_frame.method -class _ReturnedMessageEvt(_ChannelPendingEvt): # pylint: disable=R0903 +class _ReturnedMessageEvt(_ChannelPendingEvt): """This event represents a message returned by broker via `Basic.Return`""" __slots__ = ('callback', 'channel', 'method', 'properties', 'body') - def __init__(self, callback, channel, method, properties, body): # pylint: disable=R0913 + def __init__(self, callback, channel, method, properties, body): """ :param callable callback: user's callback, having the signature callback(channel, method, properties, body), where @@ -859,17 +920,17 @@ self.body = body def __repr__(self): - return ('%s(callback=%r, channel=%r, method=%r, properties=%r, ' - 'body=%.300r') % (self.__class__.__name__, self.callback, - self.channel, self.method, self.properties, - self.body) + return ('<%s callback=%r channel=%r method=%r properties=%r ' + 'body=%.300r>') % (self.__class__.__name__, self.callback, + self.channel, self.method, self.properties, + self.body) def dispatch(self): """Dispatch user's callback""" self.callback(self.channel, self.method, self.properties, self.body) -class ReturnedMessage(object): # pylint: disable=R0903 +class ReturnedMessage(object): """Represents a message returned via Basic.Return in publish-acknowledgments mode """ @@ -950,7 +1011,7 @@ return self.state == self.CANCELLED_BY_BROKER -class _QueueConsumerGeneratorInfo(object): # pylint: disable=R0903 +class _QueueConsumerGeneratorInfo(object): """Container for information about the active queue consumer generator """ __slots__ = ('params', 'consumer_tag', 'pending_events') @@ -969,11 +1030,11 @@ self.pending_events = deque() def __repr__(self): - return '%s(params=%r, consumer_tag=%r)' % ( + return '<%s params=%r consumer_tag=%r>' % ( self.__class__.__name__, self.params, self.consumer_tag) -class BlockingChannel(object): # pylint: disable=R0904,R0902 +class BlockingChannel(object): """The BlockingChannel implements blocking semantics for most things that one would use callback-passing-style for with the :py:class:`~pika.channel.Channel` class. In addition, @@ -1012,11 +1073,11 @@ # Broker's basic-ack/basic-nack args when delivery confirmation is enabled; # may concern a single or multiple messages - _OnMessageConfirmationReportArgs = namedtuple( # pylint: disable=C0103 + _OnMessageConfirmationReportArgs = namedtuple( 'BlockingChannel__OnMessageConfirmationReportArgs', 'method_frame') - # Parameters for broker-inititated Channel.Close request: reply_code + # Parameters for broker-initiated Channel.Close request: reply_code # holds the broker's non-zero error code and reply_text holds the # corresponding error message text. _OnChannelClosedByBrokerArgs = namedtuple( @@ -1071,7 +1132,7 @@ self._basic_consume_ok_result = _CallbackResult() # Receives the broker-inititated Channel.Close parameters - self._channel_closed_by_broker_result = _CallbackResult( # pylint: disable=C0103 + self._channel_closed_by_broker_result = _CallbackResult( self._OnChannelClosedByBrokerArgs) # Receives args from Basic.GetEmpty response @@ -1098,20 +1159,35 @@ LOGGER.info("Created channel=%s", self.channel_number) - def _cleanup(self): - """Clean up members that might inhibit garbage collection""" - self._message_confirmation_result.reset() - self._pending_events = deque() - self._consumer_infos = dict() - def __int__(self): """Return the channel object as its channel number + NOTE: inherited from legacy BlockingConnection; might be error-prone; + use `channel_number` property instead. + :rtype: int """ return self.channel_number + def __repr__(self): + return '<%s impl=%r>' % (self.__class__.__name__, self._impl) + + def __enter__(self): + return self + + def __exit__(self, exc_type, value, traceback): + try: + self.close() + except exceptions.ChannelClosed: + pass + + def _cleanup(self): + """Clean up members that might inhibit garbage collection""" + self._message_confirmation_result.reset() + self._pending_events = deque() + self._consumer_infos = dict() + @property def channel_number(self): """Channel number""" @@ -1133,7 +1209,8 @@ @property def is_closing(self): - """Returns True if the channel is closing. + """Returns True if client-initiated closing of the channel is in + progress. :rtype: bool @@ -1163,7 +1240,7 @@ returning true when it's time to stop processing. Their results are OR'ed together. """ - if self._impl.is_closed: + if self.is_closed: raise exceptions.ChannelClosed() if not waiters: @@ -1220,8 +1297,7 @@ self.connection._request_channel_dispatch(self.channel_number) - def _on_consumer_cancelled_by_broker(self, # pylint: disable=C0103 - method_frame): + def _on_consumer_cancelled_by_broker(self, method_frame): """Called by impl when broker cancels consumer via Basic.Cancel. This is a RabbitMQ-specific feature. The circumstances include deletion @@ -1245,8 +1321,7 @@ else: self._add_pending_event(evt) - def _on_consumer_message_delivery(self, channel, # pylint: disable=W0613 - method, properties, body): + def _on_consumer_message_delivery(self, _channel, method, properties, body): """Called by impl when a message is delivered for a consumer :param Channel channel: The implementation channel object @@ -1320,14 +1395,14 @@ evt.dispatch() - def close(self, reply_code=0, reply_text="Normal Shutdown"): + def close(self, reply_code=0, reply_text="Normal shutdown"): """Will invoke a clean shutdown of the channel with the AMQP Broker. :param int reply_code: The reply code to close the channel with :param str reply_text: The reply text to close the channel with """ - LOGGER.info('Channel.close(%s, %s)', reply_code, reply_text) + LOGGER.debug('Channel.close(%s, %s)', reply_code, reply_text) # Cancel remaining consumers self._cancel_all_consumers() @@ -1403,7 +1478,7 @@ _ReturnedMessageEvt( callback, self, method, properties, body)))) - def basic_consume(self, # pylint: disable=R0913 + def basic_consume(self, consumer_callback, queue, no_ack=False, @@ -1458,7 +1533,7 @@ arguments=arguments, consumer_callback=consumer_callback) - def _basic_consume_impl(self, # pylint: disable=R0913 + def _basic_consume_impl(self, queue, no_ack, exclusive, @@ -1574,7 +1649,7 @@ consumer_info.state) # Assertion failure here signals disconnect between consumer state - # in BlockingConnection and Connection + # in BlockingChannel and Channel assert (consumer_info.cancelled_by_broker or consumer_tag in self._impl._consumers), consumer_tag @@ -1693,7 +1768,7 @@ else: self._cancel_all_consumers() - def consume(self, queue, no_ack=False, # pylint: disable=R0913 + def consume(self, queue, no_ack=False, exclusive=False, arguments=None, inactivity_timeout=None): """Blocking consumption of a queue instead of via a callback. This @@ -1813,7 +1888,7 @@ def get_waiting_message_count(self): """Returns the number of messages that may be retrieved from the current - queue consumer generator via `BasicChannel.consume` without blocking. + queue consumer generator via `BlockingChannel.consume` without blocking. NEW in pika 0.10.0 :rtype: int @@ -1941,13 +2016,13 @@ "wait completed without GetOk and GetEmpty") return None, None, None - def basic_publish(self, exchange, routing_key, body, # pylint: disable=R0913 + def basic_publish(self, exchange, routing_key, body, properties=None, mandatory=False, immediate=False): """Publish to the channel with the given exchange, routing key and body. Returns a boolean value indicating the success of the operation. This is the legacy BlockingChannel method for publishing. See also - `BasicChannel.publish` that provides more information about failures. + `BlockingChannel.publish` that provides more information about failures. For more information on basic_publish and what the parameters do, see: @@ -1970,7 +2045,7 @@ :returns: True if delivery confirmation is not enabled (NEW in pika 0.10.0); otherwise returns False if the message could not be - deliveved (Basic.nack and/or Basic.Return) and True if the message + delivered (Basic.nack and/or Basic.Return) and True if the message was delivered (Basic.ack and no Basic.Return) """ try: @@ -1981,7 +2056,7 @@ else: return True - def publish(self, exchange, routing_key, body, # pylint: disable=R0913 + def publish(self, exchange, routing_key, body, properties=None, mandatory=False, immediate=False): """Publish to the channel with the given exchange, routing key, and body. Unlike the legacy `BlockingChannel.basic_publish`, this method @@ -2158,10 +2233,10 @@ # of publisher acknowledgments self._impl.add_on_return_callback(self._on_puback_message_returned) - def exchange_declare(self, exchange=None, # pylint: disable=R0913 + def exchange_declare(self, exchange=None, exchange_type='direct', passive=False, durable=False, auto_delete=False, internal=False, - arguments=None, **kwargs): + arguments=None): """This method creates an exchange if it does not already exist, and if the exchange exists, verifies that it is of the correct and expected class. @@ -2181,15 +2256,12 @@ :param bool auto_delete: Remove when no more queues are bound to it :param bool internal: Can only be published to by other exchanges :param dict arguments: Custom key/value pair arguments for the exchange - :param str type: via kwargs: the deprecated exchange type parameter :returns: Method frame from the Exchange.Declare-ok response :rtype: `pika.frame.Method` having `method` attribute of type `spec.Exchange.DeclareOk` """ - assert len(kwargs) <= 1, kwargs - with _CallbackResult( self._MethodFrameCallbackResultArgs) as declare_ok_result: self._impl.exchange_declare( @@ -2201,8 +2273,7 @@ auto_delete=auto_delete, internal=internal, nowait=False, - arguments=arguments, - type=kwargs["type"] if kwargs else None) + arguments=arguments) self._flush_output(declare_ok_result.is_ready) return declare_ok_result.value.method_frame @@ -2247,8 +2318,8 @@ `spec.Exchange.BindOk` """ - with _CallbackResult( - self._MethodFrameCallbackResultArgs) as bind_ok_result: + with _CallbackResult(self._MethodFrameCallbackResultArgs) as \ + bind_ok_result: self._impl.exchange_bind( callback=bind_ok_result.set_value_once, destination=destination, @@ -2290,7 +2361,7 @@ self._flush_output(unbind_ok_result.is_ready) return unbind_ok_result.value.method_frame - def queue_declare(self, queue='', passive=False, durable=False, # pylint: disable=R0913 + def queue_declare(self, queue='', passive=False, durable=False, exclusive=False, auto_delete=False, arguments=None): """Declare queue, create if needed. This method creates or checks a @@ -2314,8 +2385,8 @@ `spec.Queue.DeclareOk` """ - with _CallbackResult( - self._MethodFrameCallbackResultArgs) as declare_ok_result: + with _CallbackResult(self._MethodFrameCallbackResultArgs) as \ + declare_ok_result: self._impl.queue_declare( callback=declare_ok_result.set_value_once, queue=queue, @@ -2342,8 +2413,8 @@ `spec.Queue.DeleteOk` """ - with _CallbackResult( - self._MethodFrameCallbackResultArgs) as delete_ok_result: + with _CallbackResult(self._MethodFrameCallbackResultArgs) as \ + delete_ok_result: self._impl.queue_delete(callback=delete_ok_result.set_value_once, queue=queue, if_unused=if_unused, @@ -2364,8 +2435,8 @@ `spec.Queue.PurgeOk` """ - with _CallbackResult( - self._MethodFrameCallbackResultArgs) as purge_ok_result: + with _CallbackResult(self._MethodFrameCallbackResultArgs) as \ + purge_ok_result: self._impl.queue_purge(callback=purge_ok_result.set_value_once, queue=queue, nowait=False) @@ -2419,8 +2490,8 @@ `spec.Queue.UnbindOk` """ - with _CallbackResult( - self._MethodFrameCallbackResultArgs) as unbind_ok_result: + with _CallbackResult(self._MethodFrameCallbackResultArgs) as \ + unbind_ok_result: self._impl.queue_unbind(callback=unbind_ok_result.set_value_once, queue=queue, exchange=exchange, @@ -2439,8 +2510,8 @@ `spec.Tx.SelectOk` """ - with _CallbackResult( - self._MethodFrameCallbackResultArgs) as select_ok_result: + with _CallbackResult(self._MethodFrameCallbackResultArgs) as \ + select_ok_result: self._impl.tx_select(select_ok_result.set_value_once) self._flush_output(select_ok_result.is_ready) @@ -2454,8 +2525,8 @@ `spec.Tx.CommitOk` """ - with _CallbackResult( - self._MethodFrameCallbackResultArgs) as commit_ok_result: + with _CallbackResult(self._MethodFrameCallbackResultArgs) as \ + commit_ok_result: self._impl.tx_commit(commit_ok_result.set_value_once) self._flush_output(commit_ok_result.is_ready) @@ -2469,8 +2540,8 @@ `spec.Tx.CommitOk` """ - with _CallbackResult( - self._MethodFrameCallbackResultArgs) as rollback_ok_result: + with _CallbackResult(self._MethodFrameCallbackResultArgs) as \ + rollback_ok_result: self._impl.tx_rollback(rollback_ok_result.set_value_once) self._flush_output(rollback_ok_result.is_ready) diff -Nru python-pika-0.10.0/pika/adapters/__init__.py python-pika-0.11.0/pika/adapters/__init__.py --- python-pika-0.10.0/pika/adapters/__init__.py 2015-09-02 17:29:24.000000000 +0000 +++ python-pika-0.11.0/pika/adapters/__init__.py 2017-08-29 16:54:39.000000000 +0000 @@ -1,9 +1,8 @@ -# ***** BEGIN LICENSE BLOCK ***** -# -# For copyright and licensing please refer to COPYING. -# -# ***** END LICENSE BLOCK ***** -"""Pika provides multiple adapters to connect to RabbitMQ: +""" +Connection Adapters +=================== + +Pika provides multiple adapters to connect to RabbitMQ: - adapters.select_connection.SelectConnection: A native event based connection adapter that implements select, kqueue, poll and epoll. @@ -23,6 +22,7 @@ from pika.adapters.select_connection import IOLoop # Dynamically handle 3rd party library dependencies for optional imports + try: from pika.adapters.tornado_connection import TornadoConnection except ImportError: @@ -39,3 +39,8 @@ from pika.adapters.libev_connection import LibevConnection except ImportError: LibevConnection = None + +try: + from pika.adapters.asyncio_connection import AsyncioConnection +except ImportError: + AsyncioConnection = None diff -Nru python-pika-0.10.0/pika/adapters/libev_connection.py python-pika-0.11.0/pika/adapters/libev_connection.py --- python-pika-0.10.0/pika/adapters/libev_connection.py 2015-09-02 17:29:24.000000000 +0000 +++ python-pika-0.11.0/pika/adapters/libev_connection.py 2017-08-29 16:54:39.000000000 +0000 @@ -84,9 +84,10 @@ :param pika.connection.Parameters parameters: Connection parameters :param on_open_callback: The method to call when the connection is open :type on_open_callback: method - :param on_open_error_callback: Method to call if the connection cannot - be opened - :type on_open_error_callback: method + :param method on_open_error_callback: Called if the connection can't + be established: on_open_error_callback(connection, str|exception) + :param method on_close_callback: Called when the connection is closed: + on_close_callback(connection, reason_code, reason_text) :param bool stop_ioloop_on_close: Call ioloop.stop() if disconnected :param custom_ioloop: Override using the default IOLoop in libev :param on_signal_callback: Method to call if SIGINT or SIGTERM occur @@ -99,6 +100,7 @@ with warnings.catch_warnings(): warnings.simplefilter("ignore", RuntimeWarning) self.ioloop = pyev.default_loop() + self.ioloop.update() self.async = None self._on_signal_callback = on_signal_callback @@ -127,7 +129,7 @@ if self._on_signal_callback and not global_sigterm_watcher: global_sigterm_watcher = \ self.ioloop.signal(signal.SIGTERM, - self._handle_sigterm) + self._handle_sigterm) if self._on_signal_callback and not global_sigint_watcher: global_sigint_watcher = self.ioloop.signal(signal.SIGINT, @@ -136,11 +138,14 @@ if not self._io_watcher: self._io_watcher = \ self.ioloop.io(self.socket.fileno(), - self._PIKA_TO_LIBEV_ARRAY[self.event_state], - self._handle_events) + self._PIKA_TO_LIBEV_ARRAY[self.event_state], + self._handle_events) + # NOTE: if someone knows why this async is needed here, please add + # a comment in the code that explains it. self.async = pyev.Async(self.ioloop, self._noop_callable) self.async.start() + if self._on_signal_callback: global_sigterm_watcher.start() if self._on_signal_callback: @@ -158,7 +163,8 @@ be wiped. """ - for timer in self._active_timers.keys(): + active_timers = list(self._active_timers.keys()) + for timer in active_timers: self.remove_timeout(timer) if global_sigint_watcher: global_sigint_watcher.stop() @@ -196,7 +202,7 @@ def _reset_io_watcher(self): """Reset the IO watcher; retry as necessary - + """ self._io_watcher.stop() @@ -208,8 +214,9 @@ self._PIKA_TO_LIBEV_ARRAY[self.event_state]) break - except: # sometimes the stop() doesn't complete in time - if retries > 5: raise + except Exception: # sometimes the stop() doesn't complete in time + if retries > 5: + raise self._io_watcher.stop() # so try it again retries += 1 @@ -235,12 +242,12 @@ (callback_method, callback_timeout, kwargs) = self._active_timers[timer] + self.remove_timeout(timer) + if callback_timeout: callback_method(timeout=timer, **kwargs) else: callback_method(**kwargs) - - self.remove_timeout(timer) else: LOGGER.warning('Timer callback_method not found') @@ -267,7 +274,7 @@ :rtype: timer instance handle. """ - LOGGER.debug('deadline: {0}'.format(deadline)) + LOGGER.debug('deadline: %s', deadline) timer = self._get_timer(deadline) self._active_timers[timer] = (callback_method, callback_timeout, callback_kwargs) @@ -282,9 +289,13 @@ """ LOGGER.debug('stop') - self._active_timers.pop(timer, None) - timer.stop() - self._stopped_timers.append(timer) + try: + self._active_timers.pop(timer) + except KeyError: + LOGGER.warning("Attempted to remove inactive timer %s", timer) + else: + timer.stop() + self._stopped_timers.append(timer) def _create_and_connect_to_socket(self, sock_addr_tuple): """Call super and then set the socket to nonblocking.""" diff -Nru python-pika-0.10.0/pika/adapters/select_connection.py python-pika-0.11.0/pika/adapters/select_connection.py --- python-pika-0.10.0/pika/adapters/select_connection.py 2015-09-02 17:29:24.000000000 +0000 +++ python-pika-0.11.0/pika/adapters/select_connection.py 2017-08-29 16:54:39.000000000 +0000 @@ -2,6 +2,7 @@ platform pika is running on. """ +import abc import os import logging import socket @@ -27,21 +28,36 @@ ERROR = 0x0008 -if pika.compat.PY2: - _SELECT_ERROR = select.error -else: - # select.error was deprecated and replaced by OSError in python 3.3 - _SELECT_ERROR = OSError - - -def _get_select_errno(error): - if pika.compat.PY2: - assert isinstance(error, select.error), repr(error) - return error.args[0] +# Reason for this unconventional dict initialization is the fact that on some +# platforms select.error is an aliases for OSError. We don't want the lambda +# for select.error to win over one for OSError. +_SELECT_ERROR_CHECKERS = {} +if pika.compat.PY3: + #InterruptedError is undefined in PY2 + #pylint: disable=E0602 + _SELECT_ERROR_CHECKERS[InterruptedError] = lambda e: True +_SELECT_ERROR_CHECKERS[select.error] = lambda e: e.args[0] == errno.EINTR +_SELECT_ERROR_CHECKERS[IOError] = lambda e: e.errno == errno.EINTR +_SELECT_ERROR_CHECKERS[OSError] = lambda e: e.errno == errno.EINTR + + +# We can reduce the number of elements in the list by looking at super-sub +# class relationship because only the most generic ones needs to be caught. +# For now the optimization is left out. +# Following is better but still incomplete. +#_SELECT_ERRORS = tuple(filter(lambda e: not isinstance(e, OSError), +# _SELECT_ERROR_CHECKERS.keys()) +# + [OSError]) +_SELECT_ERRORS = tuple(_SELECT_ERROR_CHECKERS.keys()) + +def _is_resumable(exc): + ''' Check if caught exception represents EINTR error. + :param exc: exception; must be one of classes in _SELECT_ERRORS ''' + checker = _SELECT_ERROR_CHECKERS.get(exc.__class__, None) + if checker is not None: + return checker(exc) else: - assert isinstance(error, OSError), repr(error) - return error.errno - + return False class SelectConnection(BaseConnection): """An asynchronous connection adapter that attempts to use the fastest @@ -49,7 +65,7 @@ """ - def __init__(self, + def __init__(self, # pylint: disable=R0913 parameters=None, on_open_callback=None, on_open_error_callback=None, @@ -60,10 +76,10 @@ :param pika.connection.Parameters parameters: Connection parameters :param method on_open_callback: Method to call on connection open - :param on_open_error_callback: Method to call if the connection cant - be opened - :type on_open_error_callback: method - :param method on_close_callback: Method to call on connection close + :param method on_open_error_callback: Called if the connection can't + be established: on_open_error_callback(connection, str|exception) + :param method on_close_callback: Called when the connection is closed: + on_close_callback(connection, reason_code, reason_text) :param bool stop_ioloop_on_close: Call ioloop.stop() if disconnected :param custom_ioloop: Override using the global IOLoop in Tornado :raises: RuntimeError @@ -96,7 +112,7 @@ class IOLoop(object): - """Singlton wrapper that decides which type of poller to use, creates an + """Singleton wrapper that decides which type of poller to use, creates an instance of it in start_poller and keeps the invoking application in a blocking state by calling the pollers start method. Poller should keep looping until IOLoop.instance().stop() is called or there is a socket @@ -109,10 +125,8 @@ def __init__(self): self._poller = self._get_poller() - def __getattr__(self, attr): - return getattr(self._poller, attr) - - def _get_poller(self): + @staticmethod + def _get_poller(): """Determine the best poller to use for this enviroment.""" poller = None @@ -139,33 +153,129 @@ return poller + def add_timeout(self, deadline, callback_method): + """[API] Add the callback_method to the IOLoop timer to fire after + deadline seconds. Returns a handle to the timeout. Do not confuse with + Tornado's timeout where you pass in the time you want to have your + callback called. Only pass in the seconds until it's to be called. -class SelectPoller(object): - """Default behavior is to use Select since it's the widest supported and has - all of the methods we need for child classes as well. One should only need - to override the update_handler and start methods for additional types. + :param int deadline: The number of seconds to wait to call callback + :param method callback_method: The callback method + :rtype: str - """ - # Drop out of the poll loop every POLL_TIMEOUT secs as a worst case, this - # is only a backstop value. We will run timeouts when they are scheduled. - POLL_TIMEOUT = 5 - # if the poller uses MS specify 1000 - POLL_TIMEOUT_MULT = 1 + """ + return self._poller.add_timeout(deadline, callback_method) - def __init__(self): - """Create an instance of the SelectPoller + def remove_timeout(self, timeout_id): + """[API] Remove a timeout + + :param str timeout_id: The timeout id to remove + + """ + self._poller.remove_timeout(timeout_id) + + def add_handler(self, fileno, handler, events): + """[API] Add a new fileno to the set to be monitored + + :param int fileno: The file descriptor + :param method handler: What is called when an event happens + :param int events: The event mask using READ, WRITE, ERROR + + """ + self._poller.add_handler(fileno, handler, events) + + def update_handler(self, fileno, events): + """[API] Set the events to the current events + + :param int fileno: The file descriptor + :param int events: The event mask using READ, WRITE, ERROR + + """ + self._poller.update_handler(fileno, events) + + def remove_handler(self, fileno): + """[API] Remove a file descriptor from the set + + :param int fileno: The file descriptor + + """ + self._poller.remove_handler(fileno) + + def start(self): + """[API] Start the main poller loop. It will loop until requested to + exit. See `IOLoop.stop`. + + """ + self._poller.start() + + def stop(self): + """[API] Request exit from the ioloop. The loop is NOT guaranteed to + stop before this method returns. This is the only method that may be + called from another thread. + + """ + self._poller.stop() + + def process_timeouts(self): + """[Extension] Process pending timeouts, invoking callbacks for those + whose time has come + + """ + self._poller.process_timeouts() + + def activate_poller(self): + """[Extension] Activate the poller """ + self._poller.activate_poller() + + def deactivate_poller(self): + """[Extension] Deactivate the poller + + """ + self._poller.deactivate_poller() + + def poll(self): + """[Extension] Wait for events of interest on registered file + descriptors until an event of interest occurs or next timer deadline or + `_PollerBase._MAX_POLL_TIMEOUT`, whichever is sooner, and dispatch the + corresponding event handlers. + + """ + self._poller.poll() + + +_AbstractBase = abc.ABCMeta('_AbstractBase', (object,), {}) + + +class _PollerBase(_AbstractBase): # pylint: disable=R0902 + """Base class for select-based IOLoop implementations""" + + # Drop out of the poll loop every _MAX_POLL_TIMEOUT secs as a worst case; + # this is only a backstop value; we will run timeouts when they are + # scheduled. + _MAX_POLL_TIMEOUT = 5 + + # if the poller uses MS override with 1000 + POLL_TIMEOUT_MULT = 1 + + + def __init__(self): # fd-to-handler function mappings self._fd_handlers = dict() # event-to-fdset mappings self._fd_events = {READ: set(), WRITE: set(), ERROR: set()} - self._stopping = False + self._processing_fd_event_map = {} + + # Reentrancy tracker of the `start` method + self._start_nesting_levels = 0 + self._timeouts = {} self._next_timeout = None - self._processing_fd_event_map = {} + + self._stopping = False # Mutex for controlling critical sections where ioloop-interrupt sockets # are created, used, and destroyed. Needed in case `stop()` is called @@ -176,42 +286,6 @@ self._r_interrupt = None self._w_interrupt = None - def get_interrupt_pair(self): - """ Use a socketpair to be able to interrupt the ioloop if called - from another thread. Socketpair() is not supported on some OS (Win) - so use a pair of simple UDP sockets instead. The sockets will be - closed and garbage collected by python when the ioloop itself is. - """ - try: - read_sock, write_sock = socket.socketpair() - - except AttributeError: - LOGGER.debug("Using custom socketpair for interrupt") - read_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) - read_sock.bind(('localhost', 0)) - write_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) - write_sock.connect(read_sock.getsockname()) - - read_sock.setblocking(0) - write_sock.setblocking(0) - return read_sock, write_sock - - def read_interrupt(self, interrupt_sock, - events, write_only): # pylint: disable=W0613 - """ Read the interrupt byte(s). We ignore the event mask and write_only - flag as we can ony get here if there's data to be read on our fd. - - :param int interrupt_sock: The file descriptor to read from - :param int events: (unused) The events generated for this fd - :param bool write_only: (unused) True if poll was called to trigger a - write - """ - try: - os.read(interrupt_sock, 512) - except OSError as err: - if err.errno != errno.EAGAIN: - raise - def add_timeout(self, deadline, callback_method): """Add the callback_method to the IOLoop timer to fire after deadline seconds. Returns a handle to the timeout. Do not confuse with @@ -225,12 +299,16 @@ """ timeout_at = time.time() + deadline value = {'deadline': timeout_at, 'callback': callback_method} + # TODO when timer resolution is low (e.g., windows), we get id collision + # when retrying failing connection with tiny (e.g., 0) retry interval timeout_id = hash(frozenset(value.items())) self._timeouts[timeout_id] = value if not self._next_timeout or timeout_at < self._next_timeout: self._next_timeout = timeout_at + LOGGER.debug('add_timeout: added timeout %s; deadline=%s at %s', + timeout_id, deadline, timeout_at) return timeout_id def remove_timeout(self, timeout_id): @@ -241,16 +319,20 @@ """ try: timeout = self._timeouts.pop(timeout_id) + except KeyError: + LOGGER.warning('remove_timeout: %s not found', timeout_id) + else: if timeout['deadline'] == self._next_timeout: self._next_timeout = None - except KeyError: - pass - def get_next_deadline(self): + LOGGER.debug('remove_timeout: removed %s', timeout_id) + + def _get_next_deadline(self): """Get the interval to the next timeout event, or a default interval + """ if self._next_timeout: - timeout = max((self._next_timeout - time.time(), 0)) + timeout = max(self._next_timeout - time.time(), 0) elif self._timeouts: deadlines = [t['deadline'] for t in self._timeouts.values()] @@ -258,15 +340,16 @@ timeout = max((self._next_timeout - time.time(), 0)) else: - timeout = SelectPoller.POLL_TIMEOUT - - timeout = min((timeout, SelectPoller.POLL_TIMEOUT)) - return timeout * SelectPoller.POLL_TIMEOUT_MULT + timeout = self._MAX_POLL_TIMEOUT + timeout = min(timeout, self._MAX_POLL_TIMEOUT) + return timeout * self.POLL_TIMEOUT_MULT def process_timeouts(self): - """Process the self._timeouts event stack""" + """Process pending timeouts, invoking callbacks for those whose time has + come + """ now = time.time() # Run the timeouts in order of deadlines. Although this shouldn't # be strictly necessary it preserves old behaviour when timeouts @@ -287,32 +370,35 @@ if self._timeouts.pop(k, None) is not None: self._next_timeout = None - def add_handler(self, fileno, handler, events): """Add a new fileno to the set to be monitored - :param int fileno: The file descriptor - :param method handler: What is called when an event happens - :param int events: The event mask + :param int fileno: The file descriptor + :param method handler: What is called when an event happens + :param int events: The event mask using READ, WRITE, ERROR - """ + """ self._fd_handlers[fileno] = handler - self.update_handler(fileno, events) + self._set_handler_events(fileno, events) + + # Inform the derived class + self._register_fd(fileno, events) def update_handler(self, fileno, events): """Set the events to the current events :param int fileno: The file descriptor - :param int events: The event mask + :param int events: The event mask using READ, WRITE, ERROR """ + # Record the change + events_cleared, events_set = self._set_handler_events(fileno, events) - for ev in (READ, WRITE, ERROR): - if events & ev: - self._fd_events[ev].add(fileno) - else: - self._fd_events[ev].discard(fileno) - + # Inform the derived class + self._modify_fd_events(fileno, + events=events, + events_to_clear=events_cleared, + events_to_set=events_set) def remove_handler(self, fileno): """Remove a file descriptor from the set @@ -325,37 +411,96 @@ except KeyError: pass - self.update_handler(fileno, 0) + events_cleared, _ = self._set_handler_events(fileno, 0) del self._fd_handlers[fileno] + # Inform the derived class + self._unregister_fd(fileno, events_to_clear=events_cleared) + + def _set_handler_events(self, fileno, events): + """Set the handler's events to the given events; internal to + `_PollerBase`. + + :param int fileno: The file descriptor + :param int events: The event mask (READ, WRITE, ERROR) + + :returns: a 2-tuple (events_cleared, events_set) + """ + events_cleared = 0 + events_set = 0 + + for evt in (READ, WRITE, ERROR): + if events & evt: + if fileno not in self._fd_events[evt]: + self._fd_events[evt].add(fileno) + events_set |= evt + else: + if fileno in self._fd_events[evt]: + self._fd_events[evt].discard(fileno) + events_cleared |= evt + + return events_cleared, events_set + + def activate_poller(self): + """Activate the poller + + """ + # Activate the underlying poller and register current events + self._init_poller() + fd_to_events = defaultdict(int) + for event, file_descriptors in self._fd_events.items(): + for fileno in file_descriptors: + fd_to_events[fileno] |= event + + for fileno, events in fd_to_events.items(): + self._register_fd(fileno, events) + + def deactivate_poller(self): + """Deactivate the poller + + """ + self._uninit_poller() + def start(self): - """Start the main poller loop. It will loop here until self._stopping""" + """Start the main poller loop. It will loop until requested to exit - LOGGER.debug('Starting IOLoop') - self._stopping = False + """ + self._start_nesting_levels += 1 - with self._mutex: - # Watch out for reentry - if self._r_interrupt is None: - # Create ioloop-interrupt socket pair and register read handler. - # NOTE: we defer their creation because some users (e.g., - # BlockingConnection adapter) don't use the event loop and these - # sockets would get reported as leaks - self._r_interrupt, self._w_interrupt = self.get_interrupt_pair() + if self._start_nesting_levels == 1: + LOGGER.debug('Entering IOLoop') + self._stopping = False + + # Activate the underlying poller and register current events + self.activate_poller() + + # Create ioloop-interrupt socket pair and register read handler. + # NOTE: we defer their creation because some users (e.g., + # BlockingConnection adapter) don't use the event loop and these + # sockets would get reported as leaks + with self._mutex: + assert self._r_interrupt is None + self._r_interrupt, self._w_interrupt = self._get_interrupt_pair() self.add_handler(self._r_interrupt.fileno(), - self.read_interrupt, + self._read_interrupt, READ) - interrupt_sockets_created = True - else: - interrupt_sockets_created = False + + else: + LOGGER.debug('Reentering IOLoop at nesting level=%s', + self._start_nesting_levels) + try: # Run event loop while not self._stopping: self.poll() self.process_timeouts() + finally: - # Unregister and close ioloop-interrupt socket pair - if interrupt_sockets_created: + self._start_nesting_levels -= 1 + + if self._start_nesting_levels == 0: + LOGGER.debug('Cleaning up IOLoop') + # Unregister and close ioloop-interrupt socket pair with self._mutex: self.remove_handler(self._r_interrupt.fileno()) self._r_interrupt.close() @@ -363,9 +508,18 @@ self._w_interrupt.close() self._w_interrupt = None + # Deactivate the underlying poller + self.deactivate_poller() + else: + LOGGER.debug('Leaving IOLoop with %s nesting levels remaining', + self._start_nesting_levels) + def stop(self): - """Request exit from the ioloop.""" + """Request exit from the ioloop. The loop is NOT guaranteed to stop + before this method returns. This is the only method that may be called + from another thread. + """ LOGGER.debug('Stopping IOLoop') self._stopping = True @@ -374,9 +528,9 @@ return try: - # Send byte to interrupt the poll loop, use write() for - # consitency. - os.write(self._w_interrupt.fileno(), b'X') + # Send byte to interrupt the poll loop, use send() instead of + # os.write for Windows compatibility + self._w_interrupt.send(b'X') except OSError as err: if err.errno != errno.EWOULDBLOCK: raise @@ -386,63 +540,230 @@ LOGGER.warning("Failed to send ioloop interrupt: %s", err) raise - def poll(self, write_only=False): + @abc.abstractmethod + def poll(self): """Wait for events on interested filedescriptors. + """ + raise NotImplementedError + + @abc.abstractmethod + def _init_poller(self): + """Notify the implementation to allocate the poller resource""" + raise NotImplementedError + + @abc.abstractmethod + def _uninit_poller(self): + """Notify the implementation to release the poller resource""" + raise NotImplementedError + + @abc.abstractmethod + def _register_fd(self, fileno, events): + """The base class invokes this method to notify the implementation to + register the file descriptor with the polling object. The request must + be ignored if the poller is not activated. + + :param int fileno: The file descriptor + :param int events: The event mask (READ, WRITE, ERROR) + """ + raise NotImplementedError + + @abc.abstractmethod + def _modify_fd_events(self, fileno, events, events_to_clear, events_to_set): + """The base class invoikes this method to notify the implementation to + modify an already registered file descriptor. The request must be + ignored if the poller is not activated. + + :param int fileno: The file descriptor + :param int events: absolute events (READ, WRITE, ERROR) + :param int events_to_clear: The events to clear (READ, WRITE, ERROR) + :param int events_to_set: The events to set (READ, WRITE, ERROR) + """ + raise NotImplementedError + + @abc.abstractmethod + def _unregister_fd(self, fileno, events_to_clear): + """The base class invokes this method to notify the implementation to + unregister the file descriptor being tracked by the polling object. The + request must be ignored if the poller is not activated. + + :param int fileno: The file descriptor + :param int events_to_clear: The events to clear (READ, WRITE, ERROR) + """ + raise NotImplementedError + + def _dispatch_fd_events(self, fd_event_map): + """ Helper to dispatch callbacks for file descriptors that received + events. + + Before doing so we re-calculate the event mask based on what is + currently set in case it has been changed under our feet by a + previous callback. We also take a store a refernce to the + fd_event_map so that we can detect removal of an + fileno during processing of another callback and not generate + spurious callbacks on it. + + :param dict fd_event_map: Map of fds to events received on them. + """ + # Reset the prior map; if the call is nested, this will suppress the + # remaining dispatch in the earlier call. + self._processing_fd_event_map.clear() + + self._processing_fd_event_map = fd_event_map + + for fileno in dictkeys(fd_event_map): + if fileno not in fd_event_map: + # the fileno has been removed from the map under our feet. + continue + + events = fd_event_map[fileno] + for evt in [READ, WRITE, ERROR]: + if fileno not in self._fd_events[evt]: + events &= ~evt + + if events: + handler = self._fd_handlers[fileno] + handler(fileno, events) + + @staticmethod + def _get_interrupt_pair(): + """ Use a socketpair to be able to interrupt the ioloop if called + from another thread. Socketpair() is not supported on some OS (Win) + so use a pair of simple UDP sockets instead. The sockets will be + closed and garbage collected by python when the ioloop itself is. + """ + try: + read_sock, write_sock = socket.socketpair() + + except AttributeError: + LOGGER.debug("Using custom socketpair for interrupt") + read_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + read_sock.bind(('localhost', 0)) + write_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + write_sock.connect(read_sock.getsockname()) + + read_sock.setblocking(0) + write_sock.setblocking(0) + return read_sock, write_sock + + def _read_interrupt(self, interrupt_fd, events): # pylint: disable=W0613 + """ Read the interrupt byte(s). We ignore the event mask as we can ony + get here if there's data to be read on our fd. + + :param int interrupt_fd: The file descriptor to read from + :param int events: (unused) The events generated for this fd + """ + try: + # NOTE Use recv instead of os.read for windows compatibility + # TODO _r_interrupt is a DGRAM sock, so attempted reading of 512 + # bytes will not have the desired effect in case stop was called + # multiple times + self._r_interrupt.recv(512) + except OSError as err: + if err.errno != errno.EAGAIN: + raise + + +class SelectPoller(_PollerBase): + """Default behavior is to use Select since it's the widest supported and has + all of the methods we need for child classes as well. One should only need + to override the update_handler and start methods for additional types. + + """ + # if the poller uses MS specify 1000 + POLL_TIMEOUT_MULT = 1 + + def __init__(self): + """Create an instance of the SelectPoller + + """ + super(SelectPoller, self).__init__() + + + def poll(self): + """Wait for events of interest on registered file descriptors until an + event of interest occurs or next timer deadline or _MAX_POLL_TIMEOUT, + whichever is sooner, and dispatch the corresponding event handlers. - :param bool write_only: Passed through to the hadnlers to indicate - that they should only process write events. """ while True: try: - read, write, error = select.select(self._fd_events[READ], - self._fd_events[WRITE], - self._fd_events[ERROR], - self.get_next_deadline()) + if (self._fd_events[READ] or self._fd_events[WRITE] or + self._fd_events[ERROR]): + read, write, error = select.select( + self._fd_events[READ], + self._fd_events[WRITE], + self._fd_events[ERROR], + self._get_next_deadline()) + else: + # NOTE When called without any FDs, select fails on + # Windows with error 10022, 'An invalid argument was + # supplied'. + time.sleep(self._get_next_deadline()) + read, write, error = [], [], [] + break - except _SELECT_ERROR as error: - if _get_select_errno(error) == errno.EINTR: + except _SELECT_ERRORS as error: + if _is_resumable(error): continue else: raise - # Build an event bit mask for each fileno we've recieved an event for + # Build an event bit mask for each fileno we've received an event for fd_event_map = defaultdict(int) - for fd_set, ev in zip((read, write, error), (READ, WRITE, ERROR)): + for fd_set, evt in zip((read, write, error), (READ, WRITE, ERROR)): for fileno in fd_set: - fd_event_map[fileno] |= ev + fd_event_map[fileno] |= evt - self._process_fd_events(fd_event_map, write_only) + self._dispatch_fd_events(fd_event_map) - def _process_fd_events(self, fd_event_map, write_only): - """ Processes the callbacks for each fileno we've recieved events. - Before doing so we re-calculate the event mask based on what is - currently set in case it has been changed under our feet by a - previous callback. We also take a store a refernce to the - fd_event_map in the class so that we can detect removal of an - fileno during processing of another callback and not generate - spurious callbacks on it. + def _init_poller(self): + """Notify the implementation to allocate the poller resource""" + # It's a no op in SelectPoller + pass + + def _uninit_poller(self): + """Notify the implementation to release the poller resource""" + # It's a no op in SelectPoller + pass + + def _register_fd(self, fileno, events): + """The base class invokes this method to notify the implementation to + register the file descriptor with the polling object. The request must + be ignored if the poller is not activated. - :param dict fd_event_map: Map of fds to events recieved on them. + :param int fileno: The file descriptor + :param int events: The event mask using READ, WRITE, ERROR """ + # It's a no op in SelectPoller + pass - self._processing_fd_event_map = fd_event_map + def _modify_fd_events(self, fileno, events, events_to_clear, events_to_set): + """The base class invoikes this method to notify the implementation to + modify an already registered file descriptor. The request must be + ignored if the poller is not activated. - for fileno in dictkeys(fd_event_map): - if fileno not in fd_event_map: - # the fileno has been removed from the map under our feet. - continue + :param int fileno: The file descriptor + :param int events: absolute events (READ, WRITE, ERROR) + :param int events_to_clear: The events to clear (READ, WRITE, ERROR) + :param int events_to_set: The events to set (READ, WRITE, ERROR) + """ + # It's a no op in SelectPoller + pass + + def _unregister_fd(self, fileno, events_to_clear): + """The base class invokes this method to notify the implementation to + unregister the file descriptor being tracked by the polling object. The + request must be ignored if the poller is not activated. - events = fd_event_map[fileno] - for ev in [READ, WRITE, ERROR]: - if fileno not in self._fd_events[ev]: - events &= ~ev + :param int fileno: The file descriptor + :param int events_to_clear: The events to clear (READ, WRITE, ERROR) + """ + # It's a no op in SelectPoller + pass - if events: - handler = self._fd_handlers[fileno] - handler(fileno, events, write_only=write_only) -class KQueuePoller(SelectPoller): +class KQueuePoller(_PollerBase): """KQueuePoller works on BSD based systems and is faster than select""" def __init__(self): @@ -453,43 +774,12 @@ :param int events: The events to look for """ - self._kqueue = select.kqueue() super(KQueuePoller, self).__init__() - def update_handler(self, fileno, events): - """Set the events to the current events + self._kqueue = None - :param int fileno: The file descriptor - :param int events: The event mask - - """ - - kevents = list() - if not events & READ: - if fileno in self._fd_events[READ]: - kevents.append(select.kevent(fileno, - filter=select.KQ_FILTER_READ, - flags=select.KQ_EV_DELETE)) - else: - if fileno not in self._fd_events[READ]: - kevents.append(select.kevent(fileno, - filter=select.KQ_FILTER_READ, - flags=select.KQ_EV_ADD)) - if not events & WRITE: - if fileno in self._fd_events[WRITE]: - kevents.append(select.kevent(fileno, - filter=select.KQ_FILTER_WRITE, - flags=select.KQ_EV_DELETE)) - else: - if fileno not in self._fd_events[WRITE]: - kevents.append(select.kevent(fileno, - filter=select.KQ_FILTER_WRITE, - flags=select.KQ_EV_ADD)) - for event in kevents: - self._kqueue.control([event], 0) - super(KQueuePoller, self).update_handler(fileno, events) - - def _map_event(self, kevent): + @staticmethod + def _map_event(kevent): """return the event type associated with a kevent object :param kevent kevent: a kevent object as returned by kqueue.control() @@ -502,33 +792,102 @@ elif kevent.flags & select.KQ_EV_ERROR: return ERROR - def poll(self, write_only=False): - """Check to see if the events that are cared about have fired. - - :param bool write_only: Don't look at self.events, just look to see if - the adapter can write. + def poll(self): + """Wait for events of interest on registered file descriptors until an + event of interest occurs or next timer deadline or _MAX_POLL_TIMEOUT, + whichever is sooner, and dispatch the corresponding event handlers. """ while True: try: kevents = self._kqueue.control(None, 1000, - self.get_next_deadline()) + self._get_next_deadline()) break - except _SELECT_ERROR as error: - if _get_select_errno(error) == errno.EINTR: + except _SELECT_ERRORS as error: + if _is_resumable(error): continue else: raise fd_event_map = defaultdict(int) for event in kevents: - fileno = event.ident - fd_event_map[fileno] |= self._map_event(event) + fd_event_map[event.ident] |= self._map_event(event) - self._process_fd_events(fd_event_map, write_only) + self._dispatch_fd_events(fd_event_map) + def _init_poller(self): + """Notify the implementation to allocate the poller resource""" + assert self._kqueue is None -class PollPoller(SelectPoller): + self._kqueue = select.kqueue() + + def _uninit_poller(self): + """Notify the implementation to release the poller resource""" + self._kqueue.close() + self._kqueue = None + + def _register_fd(self, fileno, events): + """The base class invokes this method to notify the implementation to + register the file descriptor with the polling object. The request must + be ignored if the poller is not activated. + + :param int fileno: The file descriptor + :param int events: The event mask using READ, WRITE, ERROR + """ + self._modify_fd_events(fileno, + events=events, + events_to_clear=0, + events_to_set=events) + + def _modify_fd_events(self, fileno, events, events_to_clear, events_to_set): + """The base class invoikes this method to notify the implementation to + modify an already registered file descriptor. The request must be + ignored if the poller is not activated. + + :param int fileno: The file descriptor + :param int events: absolute events (READ, WRITE, ERROR) + :param int events_to_clear: The events to clear (READ, WRITE, ERROR) + :param int events_to_set: The events to set (READ, WRITE, ERROR) + """ + if self._kqueue is None: + return + + kevents = list() + + if events_to_clear & READ: + kevents.append(select.kevent(fileno, + filter=select.KQ_FILTER_READ, + flags=select.KQ_EV_DELETE)) + if events_to_set & READ: + kevents.append(select.kevent(fileno, + filter=select.KQ_FILTER_READ, + flags=select.KQ_EV_ADD)) + if events_to_clear & WRITE: + kevents.append(select.kevent(fileno, + filter=select.KQ_FILTER_WRITE, + flags=select.KQ_EV_DELETE)) + if events_to_set & WRITE: + kevents.append(select.kevent(fileno, + filter=select.KQ_FILTER_WRITE, + flags=select.KQ_EV_ADD)) + + self._kqueue.control(kevents, 0) + + def _unregister_fd(self, fileno, events_to_clear): + """The base class invokes this method to notify the implementation to + unregister the file descriptor being tracked by the polling object. The + request must be ignored if the poller is not activated. + + :param int fileno: The file descriptor + :param int events_to_clear: The events to clear (READ, WRITE, ERROR) + """ + self._modify_fd_events(fileno, + events=0, + events_to_clear=events_to_clear, + events_to_set=0) + + +class PollPoller(_PollerBase): """Poll works on Linux and can have better performance than EPoll in certain scenarios. Both are faster than select. @@ -543,54 +902,28 @@ :param int events: The events to look for """ - self._poll = self.create_poller() + self._poll = None super(PollPoller, self).__init__() - def create_poller(self): - return select.poll() # pylint: disable=E1101 - - def add_handler(self, fileno, handler, events): - """Add a file descriptor to the poll set - - :param int fileno: The file descriptor to check events for - :param method handler: What is called when an event happens - :param int events: The events to look for - - """ - self._poll.register(fileno, events) - super(PollPoller, self).add_handler(fileno, handler, events) - - def update_handler(self, fileno, events): - """Set the events to the current events - - :param int fileno: The file descriptor - :param int events: The event mask - + @staticmethod + def _create_poller(): """ - super(PollPoller, self).update_handler(fileno, events) - self._poll.modify(fileno, events) - - def remove_handler(self, fileno): - """Remove a fileno to the set - - :param int fileno: The file descriptor - + :rtype: `select.poll` """ - super(PollPoller, self).remove_handler(fileno) - self._poll.unregister(fileno) - - def poll(self, write_only=False): - """Poll until the next timeout waiting for an event + return select.poll() # pylint: disable=E1101 - :param bool write_only: Only process write events + def poll(self): + """Wait for events of interest on registered file descriptors until an + event of interest occurs or next timer deadline or _MAX_POLL_TIMEOUT, + whichever is sooner, and dispatch the corresponding event handlers. """ while True: try: - events = self._poll.poll(self.get_next_deadline()) + events = self._poll.poll(self._get_next_deadline()) break - except _SELECT_ERROR as error: - if _get_select_errno(error) == errno.EINTR: + except _SELECT_ERRORS as error: + if _is_resumable(error): continue else: raise @@ -599,7 +932,55 @@ for fileno, event in events: fd_event_map[fileno] |= event - self._process_fd_events(fd_event_map, write_only) + self._dispatch_fd_events(fd_event_map) + + def _init_poller(self): + """Notify the implementation to allocate the poller resource""" + assert self._poll is None + + self._poll = self._create_poller() + + def _uninit_poller(self): + """Notify the implementation to release the poller resource""" + if hasattr(self._poll, "close"): + self._poll.close() + + self._poll = None + + def _register_fd(self, fileno, events): + """The base class invokes this method to notify the implementation to + register the file descriptor with the polling object. The request must + be ignored if the poller is not activated. + + :param int fileno: The file descriptor + :param int events: The event mask using READ, WRITE, ERROR + """ + if self._poll is not None: + self._poll.register(fileno, events) + + def _modify_fd_events(self, fileno, events, events_to_clear, events_to_set): + """The base class invoikes this method to notify the implementation to + modify an already registered file descriptor. The request must be + ignored if the poller is not activated. + + :param int fileno: The file descriptor + :param int events: absolute events (READ, WRITE, ERROR) + :param int events_to_clear: The events to clear (READ, WRITE, ERROR) + :param int events_to_set: The events to set (READ, WRITE, ERROR) + """ + if self._poll is not None: + self._poll.modify(fileno, events) + + def _unregister_fd(self, fileno, events_to_clear): + """The base class invokes this method to notify the implementation to + unregister the file descriptor being tracked by the polling object. The + request must be ignored if the poller is not activated. + + :param int fileno: The file descriptor + :param int events_to_clear: The events to clear (READ, WRITE, ERROR) + """ + if self._poll is not None: + self._poll.unregister(fileno) class EPollPoller(PollPoller): @@ -609,5 +990,9 @@ """ POLL_TIMEOUT_MULT = 1 - def create_poller(self): + @staticmethod + def _create_poller(): + """ + :rtype: `select.poll` + """ return select.epoll() # pylint: disable=E1101 diff -Nru python-pika-0.10.0/pika/adapters/tornado_connection.py python-pika-0.11.0/pika/adapters/tornado_connection.py --- python-pika-0.10.0/pika/adapters/tornado_connection.py 2015-09-02 17:29:24.000000000 +0000 +++ python-pika-0.11.0/pika/adapters/tornado_connection.py 2017-08-29 16:54:39.000000000 +0000 @@ -39,9 +39,10 @@ :param pika.connection.Parameters parameters: Connection parameters :param on_open_callback: The method to call when the connection is open :type on_open_callback: method - :param on_open_error_callback: Method to call if the connection cant - be opened - :type on_open_error_callback: method + :param method on_open_error_callback: Called if the connection can't + be established: on_open_error_callback(connection, str|exception) + :param method on_close_callback: Called when the connection is closed: + on_close_callback(connection, reason_code, reason_text) :param bool stop_ioloop_on_close: Call ioloop.stop() if disconnected :param custom_ioloop: Override using the global IOLoop in Tornado @@ -55,7 +56,7 @@ def _adapter_connect(self): """Connect to the remote socket, adding the socket to the IOLoop if - connected. + connected. :rtype: bool diff -Nru python-pika-0.10.0/pika/adapters/twisted_connection.py python-pika-0.11.0/pika/adapters/twisted_connection.py --- python-pika-0.10.0/pika/adapters/twisted_connection.py 2015-09-02 17:29:24.000000000 +0000 +++ python-pika-0.11.0/pika/adapters/twisted_connection.py 2017-08-29 16:54:39.000000000 +0000 @@ -16,6 +16,7 @@ from twisted.internet import defer, error, reactor from twisted.python import log +from pika import connection from pika import exceptions from pika.adapters import base_connection @@ -105,6 +106,9 @@ try: consumer_tag = self.__channel.basic_consume(*args, **kwargs) + # TODO this except without types would suppress system-exiting + # exceptions, such as SystemExit and KeyboardInterrupt. It should be at + # least `except Exception` and preferably more specific. except: return defer.fail() @@ -163,6 +167,9 @@ try: method(*args, **kwargs) + # TODO this except without types would suppress system-exiting + # exceptions, such as SystemExit and KeyboardInterrupt. It should be + # at least `except Exception` and preferably more specific. except: return defer.fail() return d @@ -300,13 +307,6 @@ self.ioloop.remove_handler(None) self._cleanup_socket() - def _handle_disconnect(self): - """Do not stop the reactor, this would cause the entire process to exit, - just fire the disconnect callbacks - - """ - self._on_connection_closed(None, True) - def _on_connected(self): """Call superclass and then update the event state to flush the outgoing frame out. Commit 50d842526d9f12d32ad9f3c4910ef60b8c301f59 removed a @@ -339,7 +339,8 @@ if not reason.check(error.ConnectionDone): log.err(reason) - self._handle_disconnect() + self._on_terminate(connection.InternalCloseReasons.SOCKET_ERROR, + str(reason)) def doRead(self): self._handle_read() @@ -366,13 +367,13 @@ """ - def __init__(self, parameters): + def __init__(self, parameters=None, on_close_callback=None): self.ready = defer.Deferred() super(TwistedProtocolConnection, self).__init__( parameters=parameters, on_open_callback=self.connectionReady, on_open_error_callback=self.connectionFailed, - on_close_callback=None, + on_close_callback=on_close_callback, ioloop=IOLoopReactorAdapter(self, reactor), stop_ioloop_on_close=False) diff -Nru python-pika-0.10.0/pika/channel.py python-pika-0.11.0/pika/channel.py --- python-pika-0.10.0/pika/channel.py 2015-09-02 17:29:24.000000000 +0000 +++ python-pika-0.11.0/pika/channel.py 2017-08-29 16:54:39.000000000 +0000 @@ -2,20 +2,21 @@ implementing the methods and behaviors for an AMQP Channel. """ + import collections import logging -import warnings import uuid import pika.frame as frame import pika.exceptions as exceptions import pika.spec as spec from pika.utils import is_callable -from pika.compat import unicode_type, dictkeys, as_bytes +from pika.compat import unicode_type, dictkeys, is_integer LOGGER = logging.getLogger(__name__) -MAX_CHANNELS = 32768 + +MAX_CHANNELS = 65535 # per AMQP 0.9.1 spec. class Channel(object): @@ -26,19 +27,30 @@ method. """ + + # Disable pyling messages concerning "method could be a function" + # pylint: disable=R0201 + CLOSED = 0 OPENING = 1 OPEN = 2 - CLOSING = 3 + CLOSING = 3 # client-initiated close in progress + + _STATE_NAMES = { + CLOSED: 'CLOSED', + OPENING: 'OPENING', + OPEN: 'OPEN', + CLOSING: 'CLOSING' + } _ON_CHANNEL_CLEANUP_CB_KEY = '_on_channel_cleanup' - def __init__(self, connection, channel_number, on_open_callback=None): + def __init__(self, connection, channel_number, on_open_callback): """Create a new instance of the Channel :param pika.connection.Connection connection: The connection :param int channel_number: The channel number for this instance - :param method on_open_callback: The method to call on channel open + :param callable on_open_callback: The callback to call on channel open """ if not isinstance(channel_number, int): @@ -47,8 +59,10 @@ self.callbacks = connection.callbacks self.connection = connection - # The frame-handler changes depending on the type of frame processed - self.frame_dispatcher = ContentFrameDispatcher() + # Initially, flow is assumed to be active + self.flow_active = True + + self._content_assembler = ContentFrameAssembler() self._blocked = collections.deque(list()) self._blocking = None @@ -59,9 +73,16 @@ self._on_flowok_callback = None self._on_getok_callback = None self._on_openok_callback = on_open_callback - self._pending = dict() self._state = self.CLOSED + # We save the closing reason code and text to be passed to + # on-channel-close callback at closing of the channel. Channel.close + # stores the given reply_code/reply_text if the channel was in OPEN or + # OPENING states. An incoming Channel.Close AMQP method from broker will + # override this value. And a sudden loss of connection has the highest + # prececence to override it. + self._closing_code_and_text = (0, '') + # opaque cookie value set by wrapper layer (e.g., BlockingConnection) # via _set_cookie self._cookie = None @@ -74,12 +95,18 @@ """ return self.channel_number + def __repr__(self): + return '<%s number=%s %s conn=%r>' % (self.__class__.__name__, + self.channel_number, + self._STATE_NAMES[self._state], + self.connection) + def add_callback(self, callback, replies, one_shot=True): """Pass in a callback handler and a list replies from the RabbitMQ broker which you'd like the callback notified of. Callbacks should allow for the frame parameter to be passed in. - :param method callback: The method to call + :param callable callback: The callback to call :param list replies: The replies to get a callback for :param bool one_shot: Only handle the first type callback @@ -92,7 +119,8 @@ is sent by the server. The callback function should receive a frame parameter. - :param method callback: The method to call on callback + :param callable callback: The callback to call on Basic.Cancel from + broker """ self.callbacks.add(self.channel_number, spec.Basic.Cancel, callback, @@ -101,10 +129,23 @@ def add_on_close_callback(self, callback): """Pass a callback function that will be called when the channel is closed. The callback function will receive the channel, the - reply_code (int) and the reply_text (int) sent by the server describing - why the channel was closed. + reply_code (int) and the reply_text (int) describing why the channel was + closed. + + If the channel is closed by broker via Channel.Close, the callback will + receive the reply_code/reply_text provided by the broker. + + If channel closing is initiated by user (either directly of indirectly + by closing a connection containing the channel) and closing + concludes gracefully without Channel.Close from the broker and without + loss of connection, the callback will receive 0 as reply_code and empty + string as reply_text. - :param method callback: The method to call on callback + If channel was closed due to loss of connection, the callback will + receive reply_code and reply_text representing the loss of connection. + + :param callable callback: The callback, having the signature: + callback(Channel, int reply_code, str reply_text) """ self.callbacks.add(self.channel_number, '_on_channel_close', callback, @@ -115,7 +156,7 @@ called by the remote server. Note that newer versions of RabbitMQ will not issue this but instead use TCP backpressure - :param method callback: The method to call on callback + :param callable callback: The callback function """ self._has_on_flow_callback = True @@ -126,9 +167,9 @@ """Pass a callback function that will be called when basic_publish as sent a message that has been rejected and returned by the server. - :param method callback: The method to call on callback with the - signature callback(channel, method, properties, - body), where + :param callable callback: The function to call, having the signature + callback(channel, method, properties, body) + where channel: pika.Channel method: pika.spec.Basic.Return properties: pika.spec.BasicProperties @@ -145,7 +186,7 @@ confirm mode. The acknowledgement can be for a single message or a set of messages up to and including a specific message. - :param int delivery-tag: The server-assigned delivery tag + :param integer delivery_tag: int/long The server-assigned delivery tag :param bool multiple: If set to True, the delivery tag is treated as "up to and including", so that multiple messages can be acknowledged with a single method. If set @@ -169,23 +210,59 @@ basic.cancel from the client). This allows clients to be notified of the loss of consumers due to events such as queue deletion. - :param method callback: Method to call for a Basic.CancelOk response + :param callable callback: Callback to call for a Basic.CancelOk + response; MUST be None when nowait=True. MUST be callable when + nowait=False. :param str consumer_tag: Identifier for the consumer :param bool nowait: Do not expect a Basic.CancelOk response - :raises: ValueError + + :raises ValueError: """ self._validate_channel_and_callback(callback) - if consumer_tag not in self.consumer_tags: + + if nowait: + if callback is not None: + raise ValueError( + 'Completion callback must be None when nowait=True') + else: + if callback is None: + raise ValueError( + 'Must have completion callback with nowait=False') + + if consumer_tag in self._cancelled: + # We check for cancelled first, because basic_cancel removes + # consumers closed with nowait from self._consumers + LOGGER.warning('basic_cancel - consumer is already cancelling: %s', + consumer_tag) return - if callback: - if nowait is True: - raise ValueError('Can not pass a callback if nowait is True') + + if consumer_tag not in self._consumers: + # Could be cancelled by user or broker earlier + LOGGER.warning('basic_cancel - consumer not found: %s', + consumer_tag) + return + + LOGGER.debug('Cancelling consumer: %s (nowait=%s)', + consumer_tag, nowait) + + if nowait: + # This is our last opportunity while the channel is open to remove + # this consumer callback and help gc; unfortunately, this consumer's + # self._cancelled and self._consumers_with_noack (if any) entries + # will persist until the channel is closed. + del self._consumers[consumer_tag] + + if callback is not None: + if nowait: + raise ValueError('Cannot pass a callback if nowait is True') self.callbacks.add(self.channel_number, spec.Basic.CancelOk, callback) + self._cancelled.add(consumer_tag) - self._rpc(spec.Basic.Cancel(consumer_tag=consumer_tag, - nowait=nowait), self._on_cancelok, + + self._rpc(spec.Basic.Cancel(consumer_tag=consumer_tag, nowait=nowait), + self._on_cancelok if not nowait else None, [(spec.Basic.CancelOk, {'consumer_tag': consumer_tag})] if nowait is False else []) @@ -195,15 +272,18 @@ exclusive=False, consumer_tag=None, arguments=None): - """Sends the AMQP command Basic.Consume to the broker and binds messages + """Sends the AMQP 0-9-1 command Basic.Consume to the broker and binds messages for the consumer_tag to the consumer callback. If you do not pass in a consumer_tag, one will be automatically generated for you. Returns the consumer tag. For more information on basic_consume, see: + Tutorial 2 at http://www.rabbitmq.com/getstarted.html + http://www.rabbitmq.com/confirms.html http://www.rabbitmq.com/amqp-0-9-1-reference.html#basic.consume - :param method consumer_callback: The method to callback when consuming + + :param callable consumer_callback: The function to call when consuming with the signature consumer_callback(channel, method, properties, body), where channel: pika.Channel @@ -213,11 +293,12 @@ :param queue: The queue to consume from :type queue: str or unicode - :param bool no_ack: Tell the broker to not expect a response + :param bool no_ack: if set to True, automatic acknowledgement mode will be used + (see http://www.rabbitmq.com/confirms.html) :param bool exclusive: Don't allow other consumers on the queue :param consumer_tag: Specify your own consumer tag :type consumer_tag: str or unicode - :param dict arguments: Custom key/value pair arguments for the consume + :param dict arguments: Custom key/value pair arguments for the consumer :rtype: str """ @@ -234,7 +315,6 @@ self._consumers_with_noack.add(consumer_tag) self._consumers[consumer_tag] = consumer_callback - self._pending[consumer_tag] = list() self._rpc(spec.Basic.Consume(queue=queue, consumer_tag=consumer_tag, no_ack=no_ack, @@ -260,12 +340,13 @@ """Get a single message from the AMQP broker. If you want to be notified of Basic.GetEmpty, use the Channel.add_callback method adding your Basic.GetEmpty callback which should expect only one - parameter, frame. For more information on basic_get and its - parameters, see: + parameter, frame. Due to implementation details, this cannot be called + a second time until the callback is executed. For more information on + basic_get and its parameters, see: http://www.rabbitmq.com/amqp-0-9-1-reference.html#basic.get - :param method callback: The method to callback with a message that has + :param callable callback: The callback to call with a message that has the signature callback(channel, method, properties, body), where: channel: pika.Channel method: pika.spec.Basic.GetOk @@ -277,7 +358,13 @@ """ self._validate_channel_and_callback(callback) + # TODO Is basic_get meaningful when callback is None? + if self._on_getok_callback is not None: + raise exceptions.DuplicateGetOkCallback() self._on_getok_callback = callback + # TODO Strangely, not using _rpc for the synchronous Basic.Get. Would + # need to extend _rpc to handle Basic.GetOk method, header, and body + # frames (or similar) self._send_method(spec.Basic.Get(queue=queue, no_ack=no_ack)) def basic_nack(self, delivery_tag=None, multiple=False, requeue=True): @@ -285,7 +372,7 @@ It can be used to interrupt and cancel large incoming messages, or return untreatable messages to their original queue. - :param int delivery-tag: The server-assigned delivery tag + :param integer delivery-tag: int/long The server-assigned delivery tag :param bool multiple: If set to True, the delivery tag is treated as "up to and including", so that multiple messages can be acknowledged with a single method. If set @@ -349,7 +436,7 @@ following message is already held locally, rather than needing to be sent down the channel. Prefetching gives a performance improvement. - :param method callback: The method to callback for Basic.QosOk response + :param callable callback: The callback to call for Basic.QosOk response :param int prefetch_size: This field specifies the prefetch window size. The server will send a message in advance if it is equal to or smaller in size @@ -372,15 +459,15 @@ """ self._validate_channel_and_callback(callback) return self._rpc(spec.Basic.Qos(prefetch_size, prefetch_count, - all_channels), callback, - [spec.Basic.QosOk]) + all_channels), + callback, [spec.Basic.QosOk]) def basic_reject(self, delivery_tag, requeue=True): """Reject an incoming message. This method allows a client to reject a message. It can be used to interrupt and cancel large incoming messages, or return untreatable messages to their original queue. - :param int delivery-tag: The server-assigned delivery tag + :param integer delivery-tag: int/long The server-assigned delivery tag :param bool requeue: If requeue is true, the server will attempt to requeue the message. If requeue is false or the requeue attempt fails the messages are discarded or @@ -390,7 +477,7 @@ """ if not self.is_open: raise exceptions.ChannelClosed() - if not isinstance(delivery_tag, int): + if not is_integer(delivery_tag): raise TypeError('delivery_tag must be an integer') return self._send_method(spec.Basic.Reject(delivery_tag, requeue)) @@ -399,7 +486,8 @@ on a specified channel. Zero or more messages may be redelivered. This method replaces the asynchronous Recover. - :param method callback: Method to call when receiving Basic.RecoverOk + :param callable callback: Callback to call when receiving + Basic.RecoverOk :param bool requeue: If False, the message will be redelivered to the original recipient. If True, the server will attempt to requeue the message, potentially then @@ -410,21 +498,44 @@ return self._rpc(spec.Basic.Recover(requeue), callback, [spec.Basic.RecoverOk]) - def close(self, reply_code=0, reply_text="Normal Shutdown"): - """Will invoke a clean shutdown of the channel with the AMQP Broker. + def close(self, reply_code=0, reply_text="Normal shutdown"): + """Invoke a graceful shutdown of the channel with the AMQP Broker. - :param int reply_code: The reply code to close the channel with - :param str reply_text: The reply text to close the channel with + If channel is OPENING, transition to CLOSING and suppress the incoming + Channel.OpenOk, if any. + :param int reply_code: The reason code to send to broker + :param str reply_text: The reason text to send to broker + + :raises ChannelClosed: if channel is already closed + :raises ChannelAlreadyClosing: if channel is already closing """ - if not self.is_open: - raise exceptions.ChannelClosed() - LOGGER.info('Channel.close(%s, %s)', reply_code, reply_text) - if self._consumers: - LOGGER.debug('Cancelling %i consumers', len(self._consumers)) - for consumer_tag in dictkeys(self._consumers): - self.basic_cancel(consumer_tag=consumer_tag) + if self.is_closed: + # Whoever is calling `close` might expect the on-channel-close-cb + # to be called, which won't happen when it's already closed + raise exceptions.ChannelClosed('Already closed: %s' % self) + + if self.is_closing: + # Whoever is calling `close` might expect their reply_code and + # reply_text to be sent to broker, which won't happen if we're + # already closing. + raise exceptions.ChannelAlreadyClosing('Already closing: %s' % self) + + # If channel is OPENING, we will transition it to CLOSING state, + # causing the _on_openok method to suppress the OPEN state transition + # and the on-channel-open-callback + + LOGGER.info('Closing channel (%s): %r on %s', + reply_code, reply_text, self) + + for consumer_tag in dictkeys(self._consumers): + if consumer_tag not in self._cancelled: + self.basic_cancel(consumer_tag=consumer_tag, nowait=True) + + # Change state after cancelling consumers to avoid ChannelClosed + # exception from basic_cancel self._set_state(self.CLOSING) + self._rpc(spec.Channel.Close(reply_code, reply_text, 0, 0), self._on_closeok, [spec.Channel.CloseOk]) @@ -436,13 +547,20 @@ For more information see: http://www.rabbitmq.com/extensions.html#confirms - :param method callback: The callback for delivery confirmations + :param callable callback: The callback for delivery confirmations that + has the following signature: callback(pika.frame.Method), where + method_frame contains either method `spec.Basic.Ack` or + `spec.Basic.Nack`. :param bool nowait: Do not send a reply frame (Confirm.SelectOk) """ self._validate_channel_and_callback(callback) - if (self.connection.publisher_confirms is False or - self.connection.basic_nack is False): + + # TODO confirm_deliver should require a callback; it's meaningless + # without a user callback to receieve Basic.Ack/Basic.Nack notifications + + if not (self.connection.publisher_confirms and + self.connection.basic_nack): raise exceptions.MethodNotImplemented('Not Supported on Server') # Add the ack and nack callbacks @@ -453,7 +571,8 @@ False) # Send the RPC command - self._rpc(spec.Confirm.Select(nowait), self._on_selectok, + self._rpc(spec.Confirm.Select(nowait), + self._on_selectok if not nowait else None, [spec.Confirm.SelectOk] if nowait is False else []) @property @@ -474,7 +593,8 @@ arguments=None): """Bind an exchange to another exchange. - :param method callback: The method to call on Exchange.BindOk + :param callable callback: The callback to call on Exchange.BindOk; MUST + be None when nowait=True :param destination: The destination exchange to bind :type destination: str or unicode :param source: The source exchange to bind to @@ -500,8 +620,7 @@ auto_delete=False, internal=False, nowait=False, - arguments=None, - type=None): + arguments=None): """This method creates an exchange if it does not already exist, and if the exchange exists, verifies that it is of the correct and expected class. @@ -511,7 +630,8 @@ exchange does not already exist, the server MUST raise a channel exception with reply code 404 (not found). - :param method callback: Call this method on Exchange.DeclareOk + :param callable callback: Call this method on Exchange.DeclareOk; MUST + be None when nowait=True :param exchange: The exchange name consists of a non-empty :type exchange: str or unicode sequence of these characters: letters, @@ -524,19 +644,15 @@ :param bool internal: Can only be published to by other exchanges :param bool nowait: Do not expect an Exchange.DeclareOk response :param dict arguments: Custom key/value pair arguments for the exchange - :param str type: The deprecated exchange type parameter """ self._validate_channel_and_callback(callback) - if type is not None: - warnings.warn('type is deprecated, use exchange_type instead', - DeprecationWarning) - if exchange_type == 'direct' and type != exchange_type: - exchange_type = type + return self._rpc(spec.Exchange.Declare(0, exchange, exchange_type, passive, durable, auto_delete, internal, nowait, - arguments or dict()), callback, + arguments or dict()), + callback, [spec.Exchange.DeclareOk] if nowait is False else []) def exchange_delete(self, @@ -546,7 +662,8 @@ nowait=False): """Delete the exchange. - :param method callback: The method to call on Exchange.DeleteOk + :param callable callback: The function to call on Exchange.DeleteOk; + MUST be None when nowait=True. :param exchange: The exchange name :type exchange: str or unicode :param bool if_unused: only delete if the exchange is unused @@ -567,7 +684,8 @@ arguments=None): """Unbind an exchange from another exchange. - :param method callback: The method to call on Exchange.UnbindOk + :param callable callback: The callback to call on Exchange.UnbindOk; + MUST be None when nowait=True. :param destination: The destination exchange to unbind :type destination: str or unicode :param source: The source exchange to unbind from @@ -581,8 +699,8 @@ self._validate_channel_and_callback(callback) return self._rpc(spec.Exchange.Unbind(0, destination, source, routing_key, nowait, arguments), - callback, [spec.Exchange.UnbindOk] if nowait is False - else []) + callback, + [spec.Exchange.UnbindOk] if nowait is False else []) def flow(self, callback, active): """Turn Channel flow control off and on. Pass a callback to be notified @@ -592,7 +710,7 @@ http://www.rabbitmq.com/amqp-0-9-1-reference.html#channel.flow - :param method callback: The callback method + :param callable callback: The callback to call upon completion :param bool active: Turn flow on or off """ @@ -612,7 +730,8 @@ @property def is_closing(self): - """Returns True if the channel is closing. + """Returns True if client-initiated closing of the channel is in + progress. :rtype: bool @@ -640,7 +759,8 @@ arguments=None): """Bind the queue to the specified exchange - :param method callback: The method to call on Queue.BindOk + :param callable callback: The callback to call on Queue.BindOk; + MUST be None when nowait=True. :param queue: The queue to bind to the exchange :type queue: str or unicode :param exchange: The source exchange to bind to @@ -656,8 +776,8 @@ if routing_key is None: routing_key = queue return self._rpc(spec.Queue.Bind(0, queue, exchange, routing_key, - nowait, arguments or dict()), callback, - replies) + nowait, arguments or dict()), + callback, replies) def queue_declare(self, callback, queue='', @@ -674,7 +794,8 @@ Leave the queue name empty for a auto-named queue in RabbitMQ - :param method callback: The method to call on Queue.DeclareOk + :param callable callback: callback(pika.frame.Method) for method + Queue.DeclareOk; MUST be None when nowait=True. :param queue: The queue name :type queue: str or unicode :param bool passive: Only check to see if the queue exists @@ -689,13 +810,13 @@ condition = (spec.Queue.DeclareOk, {'queue': queue}) else: - condition = spec.Queue.DeclareOk + condition = spec.Queue.DeclareOk # pylint: disable=R0204 replies = [condition] if nowait is False else [] self._validate_channel_and_callback(callback) return self._rpc(spec.Queue.Declare(0, queue, passive, durable, exclusive, auto_delete, nowait, - arguments or dict()), callback, - replies) + arguments or dict()), + callback, replies) def queue_delete(self, callback=None, @@ -705,7 +826,8 @@ nowait=False): """Delete a queue from the broker. - :param method callback: The method to call on Queue.DeleteOk + :param callable callback: The callback to call on Queue.DeleteOk; + MUST be None when nowait=True. :param queue: The queue to delete :type queue: str or unicode :param bool if_unused: only delete if it's unused @@ -716,12 +838,14 @@ replies = [spec.Queue.DeleteOk] if nowait is False else [] self._validate_channel_and_callback(callback) return self._rpc(spec.Queue.Delete(0, queue, if_unused, if_empty, - nowait), callback, replies) + nowait), + callback, replies) def queue_purge(self, callback=None, queue='', nowait=False): """Purge all of the messages from the specified queue - :param method callback: The method to call on Queue.PurgeOk + :param callable callback: The callback to call on Queue.PurgeOk; + MUST be None when nowait=True. :param queue: The queue to purge :type queue: str or unicode :param bool nowait: Do not expect a Queue.PurgeOk response @@ -739,7 +863,7 @@ arguments=None): """Unbind a queue from an exchange. - :param method callback: The method to call on Queue.UnbindOk + :param callable callback: The callback to call on Queue.UnbindOk :param queue: The queue to unbind from the exchange :type queue: str or unicode :param exchange: The source exchange to bind from @@ -753,13 +877,13 @@ if routing_key is None: routing_key = queue return self._rpc(spec.Queue.Unbind(0, queue, exchange, routing_key, - arguments or dict()), callback, - [spec.Queue.UnbindOk]) + arguments or dict()), + callback, [spec.Queue.UnbindOk]) def tx_commit(self, callback=None): """Commit a transaction - :param method callback: The callback for delivery confirmations + :param callable callback: The callback for delivery confirmations """ self._validate_channel_and_callback(callback) @@ -768,7 +892,7 @@ def tx_rollback(self, callback=None): """Rollback a transaction. - :param method callback: The callback for delivery confirmations + :param callable callback: The callback for delivery confirmations """ self._validate_channel_and_callback(callback) @@ -779,7 +903,7 @@ standard transactions. The client must use this method at least once on a channel before using the Commit or Rollback methods. - :param method callback: The callback for delivery confirmations + :param callable callback: The callback for delivery confirmations """ self._validate_channel_and_callback(callback) @@ -814,26 +938,13 @@ be called when the channel is being cleaned up after all channel-close callbacks callbacks. - :param method callback: The method to call on callback with the + :param callable callback: The callback to call, having the signature: callback(channel) """ self.callbacks.add(self.channel_number, self._ON_CHANNEL_CLEANUP_CB_KEY, callback, one_shot=True, only_caller=self) - def _add_pending_msg(self, consumer_tag, method_frame, header_frame, body): - """Add the received message to the pending message stack. - - :param str consumer_tag: The consumer tag for the message - :param pika.frame.Method method_frame: The received method frame - :param pika.frame.Header header_frame: The received header frame - :param body: The message body - :type body: str or unicode - - """ - self._pending[consumer_tag].append((self, method_frame.method, - header_frame.properties, body)) - def _cleanup(self): """Remove all consumers and any callbacks for the channel.""" self.callbacks.process(self.channel_number, @@ -850,12 +961,8 @@ :param str consumer_tag: The consumer tag to cleanup """ - if consumer_tag in self._consumers_with_noack: - self._consumers_with_noack.remove(consumer_tag) - if consumer_tag in self._consumers: - del self._consumers[consumer_tag] - if consumer_tag in self._pending: - del self._pending[consumer_tag] + self._consumers_with_noack.discard(consumer_tag) + self._consumers.pop(consumer_tag, None) self._cancelled.discard(consumer_tag) def _get_cookie(self): @@ -866,30 +973,23 @@ """ return self._cookie - def _get_pending_msg(self, consumer_tag): - """Get a pending message for the consumer tag from the stack. - - :param str consumer_tag: The consumer tag to get a message from - :rtype: tuple(pika.frame.Header, pika.frame.Method, str|unicode) - - """ - return self._pending[consumer_tag].pop(0) - def _handle_content_frame(self, frame_value): """This is invoked by the connection when frames that are not registered with the CallbackManager have been found. This should only be the case when the frames are related to content delivery. - The frame_dispatcher will be invoked which will return the fully formed - message in three parts when all of the body frames have been received. + The _content_assembler will be invoked which will return the fully + formed message in three parts when all of the body frames have been + received. :param pika.amqp_object.Frame frame_value: The frame to deliver """ try: - response = self.frame_dispatcher.process(frame_value) + response = self._content_assembler.process(frame_value) except exceptions.UnexpectedFrameError: - return self._unexpected_frame(frame_value) + self._on_unexpected_frame(frame_value) + return if response: if isinstance(response[0].method, spec.Basic.Deliver): @@ -899,14 +999,6 @@ elif isinstance(response[0].method, spec.Basic.Return): self._on_return(*response) - def _has_content(self, method_frame): - """Return a bool if it's a content method as defined by the spec - - :param pika.amqp_object.Method method_frame: The method frame received - - """ - return spec.has_content(method_frame.INDEX) - def _on_cancel(self, method_frame): """When the broker cancels a consumer, delete it from our internal dictionary. @@ -932,31 +1024,80 @@ def _on_close(self, method_frame): """Handle the case where our channel has been closed for us - :param pika.frame.Method method_frame: The close frame + :param pika.frame.Method method_frame: Method frame with Channel.Close + method """ - LOGGER.info('%s', method_frame) - LOGGER.warning('Received remote Channel.Close (%s): %s', + LOGGER.warning('Received remote Channel.Close (%s): %r on %s', method_frame.method.reply_code, - method_frame.method.reply_text) - if self.connection.is_open: - self._send_method(spec.Channel.CloseOk()) - self._set_state(self.CLOSED) - self.callbacks.process(self.channel_number, '_on_channel_close', self, - self, method_frame.method.reply_code, - method_frame.method.reply_text) - self._cleanup() + method_frame.method.reply_text, + self) + + # AMQP 0.9.1 requires CloseOk response to Channel.Close; Note, we should + # not be called when connection is closed + self._send_method(spec.Channel.CloseOk()) + + if self.is_closing: + # Since we already sent Channel.Close, we need to wait for CloseOk + # before cleaning up to avoid a race condition whereby our channel + # number might get reused before our CloseOk arrives + + # Save the details to provide to user callback when CloseOk arrives + self._closing_code_and_text = (method_frame.method.reply_code, + method_frame.method.reply_text) + else: + self._set_state(self.CLOSED) + try: + self.callbacks.process(self.channel_number, '_on_channel_close', + self, self, + method_frame.method.reply_code, + method_frame.method.reply_text) + finally: + self._cleanup() + + def _on_close_meta(self, reply_code, reply_text): + """Handle meta-close request from Connection's cleanup logic after + sudden connection loss. We use this opportunity to transition to + CLOSED state, clean up the channel, and dispatch the on-channel-closed + callbacks. + + :param int reply_code: The reply code to pass to on-close callback + :param str reply_text: The reply text to pass to on-close callback + + """ + LOGGER.debug('Handling meta-close on %s', self) + + if not self.is_closed: + self._closing_code_and_text = reply_code, reply_text + + self._set_state(self.CLOSED) + + try: + self.callbacks.process(self.channel_number, '_on_channel_close', + self, self, + reply_code, + reply_text) + finally: + self._cleanup() def _on_closeok(self, method_frame): """Invoked when RabbitMQ replies to a Channel.Close method - :param pika.frame.Method method_frame: The CloseOk frame + :param pika.frame.Method method_frame: Method frame with Channel.CloseOk + method """ + LOGGER.info('Received %s on %s', method_frame.method, self) + self._set_state(self.CLOSED) - self.callbacks.process(self.channel_number, '_on_channel_close', self, - self, 0, '') - self._cleanup() + + try: + self.callbacks.process(self.channel_number, '_on_channel_close', + self, self, + self._closing_code_and_text[0], + self._closing_code_and_text[1]) + finally: + self._cleanup() def _on_deliver(self, method_frame, header_frame, body): """Cope with reentrancy. If a particular consumer is still active when @@ -970,15 +1111,16 @@ """ consumer_tag = method_frame.method.consumer_tag + if consumer_tag in self._cancelled: if self.is_open and consumer_tag not in self._consumers_with_noack: self.basic_reject(method_frame.method.delivery_tag) return + if consumer_tag not in self._consumers: - return self._add_pending_msg(consumer_tag, method_frame, - header_frame, body) - while self._pending[consumer_tag]: - self._consumers[consumer_tag](*self._get_pending_msg(consumer_tag)) + LOGGER.error('Unexpected delivery: %r', method_frame) + return + self._consumers[consumer_tag](self, method_frame.method, header_frame.properties, body) @@ -992,7 +1134,7 @@ """ LOGGER.debug('Discarding frame %r', method_frame) - def _on_flow(self, method_frame_unused): + def _on_flow(self, _method_frame_unused): """Called if the server sends a Channel.Flow frame. :param pika.frame.Method method_frame_unused: The Channel.Flow frame @@ -1021,6 +1163,8 @@ """ LOGGER.debug('Received Basic.GetEmpty: %r', method_frame) + if self._on_getok_callback is not None: + self._on_getok_callback = None def _on_getok(self, method_frame, header_frame, body): """Called in reply to a Basic.Get when there is a message. @@ -1038,19 +1182,27 @@ else: LOGGER.error('Basic.GetOk received with no active callback') - def _on_openok(self, frame_unused): + def _on_openok(self, method_frame): """Called by our callback handler when we receive a Channel.OpenOk and subsequently calls our _on_openok_callback which was passed into the Channel constructor. The reason we do this is because we want to make sure that the on_open_callback parameter passed into the Channel constructor is not the first callback we make. - :param pika.frame.Method frame_unused: Unused Channel.OpenOk frame + Suppress the state transition and callback if channel is already in + CLOSING state. + + :param pika.frame.Method method_frame: Channel.OpenOk frame """ - self._set_state(self.OPEN) - if self._on_openok_callback is not None: - self._on_openok_callback(self) + # Suppress OpenOk if the user or Connection.Close started closing it + # before open completed. + if self.is_closing: + LOGGER.debug('Suppressing while in closing state: %s', method_frame) + else: + self._set_state(self.OPEN) + if self._on_openok_callback is not None: + self._on_openok_callback(self) def _on_return(self, method_frame, header_frame, body): """Called if the server sends a Basic.Return frame. @@ -1077,7 +1229,7 @@ """ LOGGER.debug("Confirm.SelectOk Received: %r", method_frame) - def _on_synchronous_complete(self, method_frame_unused): + def _on_synchronous_complete(self, _method_frame_unused): """This is called when a synchronous command is completed. It will undo the blocking state and send all the frames that stacked up while we were in the blocking state. @@ -1087,67 +1239,93 @@ """ LOGGER.debug('%i blocked frames', len(self._blocked)) self._blocking = None - while len(self._blocked) > 0 and self._blocking is None: + while self._blocked and self._blocking is None: self._rpc(*self._blocked.popleft()) - def _rpc(self, method_frame, callback=None, acceptable_replies=None): - """Shortcut wrapper to the Connection's rpc command using its callback - stack, passing in our channel number. + def _rpc(self, method, callback=None, acceptable_replies=None): + """Make a syncronous channel RPC call for a synchronous method frame. If + the channel is already in the blocking state, then enqueue the request, + but don't send it at this time; it will be eventually sent by + `_on_synchronous_complete` after the prior blocking request receives a + resposne. If the channel is not in the blocking state and + `acceptable_replies` is not empty, transition the channel to the + blocking state and register for `_on_synchronous_complete` before + sending the request. + + NOTE: A callback must be accompanied by non-empty acceptable_replies. + + :param pika.amqp_object.Method method: The AMQP method to invoke + :param callable callback: The callback for the RPC response + :param acceptable_replies: A (possibly empty) sequence of + replies this RPC call expects or None + :type acceptable_replies: list or None + + """ + assert method.synchronous, ( + 'Only synchronous-capable methods may be used with _rpc: %r' + % (method,)) - :param pika.amqp_object.Method method_frame: The method frame to call - :param method callback: The callback for the RPC response - :param list acceptable_replies: The replies this RPC call expects + # Validate we got None or a list of acceptable_replies + if not isinstance(acceptable_replies, (type(None), list)): + raise TypeError('acceptable_replies should be list or None') - """ - # Make sure the channel is open + if callback is not None: + # Validate the callback is callable + if not is_callable(callback): + raise TypeError( + 'callback should be None or a callable') + + # Make sure that callback is accompanied by acceptable replies + if not acceptable_replies: + raise ValueError( + 'Unexpected callback for asynchronous (nowait) operation.') + + # Make sure the channel is not closed yet if self.is_closed: raise exceptions.ChannelClosed # If the channel is blocking, add subsequent commands to our stack if self._blocking: - return self._blocked.append([method_frame, callback, - acceptable_replies]) - - # Validate we got None or a list of acceptable_replies - if acceptable_replies and not isinstance(acceptable_replies, list): - raise TypeError("acceptable_replies should be list or None") - - # Validate the callback is callable - if callback and not is_callable(callback): - raise TypeError("callback should be None, a function or method.") - - # Block until a response frame is received for synchronous frames - if method_frame.synchronous: - self._blocking = method_frame.NAME + LOGGER.debug('Already in blocking state, so enqueueing method %s; ' + 'acceptable_replies=%r', + method, acceptable_replies) + return self._blocked.append([method, callback, acceptable_replies]) # If acceptable replies are set, add callbacks if acceptable_replies: - for reply in acceptable_replies or list(): + # Block until a response frame is received for synchronous frames + self._blocking = method.NAME + LOGGER.debug( + 'Entering blocking state on frame %s; acceptable_replies=%r', + method, acceptable_replies) + + for reply in acceptable_replies: if isinstance(reply, tuple): reply, arguments = reply else: arguments = None - LOGGER.debug('Adding in on_synchronous_complete callback') + LOGGER.debug('Adding on_synchronous_complete callback') self.callbacks.add(self.channel_number, reply, self._on_synchronous_complete, arguments=arguments) - if callback: - LOGGER.debug('Adding passed in callback') + if callback is not None: + LOGGER.debug('Adding passed-in callback') self.callbacks.add(self.channel_number, reply, callback, arguments=arguments) - self._send_method(method_frame) + self._send_method(method) - def _send_method(self, method_frame, content=None): + def _send_method(self, method, content=None): """Shortcut wrapper to send a method through our connection, passing in the channel number - :param pika.object.Method method_frame: The method frame to send + :param pika.amqp_object.Method method: The method to send :param tuple content: If set, is a content frame, is tuple of properties and body. """ - self.connection._send_method(self.channel_number, method_frame, content) + # pylint: disable=W0212 + self.connection._send_method(self.channel_number, method, content) def _set_cookie(self, cookie): """Used by wrapper layer (e.g., `BlockingConnection`) to link the @@ -1166,30 +1344,34 @@ """ self._state = connection_state - def _unexpected_frame(self, frame_value): + def _on_unexpected_frame(self, frame_value): """Invoked when a frame is received that is not setup to be processed. :param pika.frame.Frame frame_value: The frame received """ - LOGGER.warning('Unexpected frame: %r', frame_value) + LOGGER.error('Unexpected frame: %r', frame_value) def _validate_channel_and_callback(self, callback): + """Verify that channel is open and callback is callable if not None + + :raises ChannelClosed: if channel is closed + :raises ValueError: if callback is not None and is not callable + """ if not self.is_open: raise exceptions.ChannelClosed() if callback is not None and not is_callable(callback): raise ValueError('callback must be a function or method') -class ContentFrameDispatcher(object): +class ContentFrameAssembler(object): """Handle content related frames, building a message and return the message back in three parts upon receipt. """ def __init__(self): - """Create a new instance of the Dispatcher passing in the callback - manager. + """Create a new instance of the conent frame assembler. """ self._method_frame = None @@ -1206,7 +1388,7 @@ """ if (isinstance(frame_value, frame.Method) and - spec.has_content(frame_value.method.INDEX)): + spec.has_content(frame_value.method.INDEX)): self._method_frame = frame_value elif isinstance(frame_value, frame.Header): self._header_frame = frame_value diff -Nru python-pika-0.10.0/pika/compat.py python-pika-0.11.0/pika/compat.py --- python-pika-0.10.0/pika/compat.py 2015-09-02 17:29:24.000000000 +0000 +++ python-pika-0.11.0/pika/compat.py 2017-08-29 16:54:39.000000000 +0000 @@ -1,13 +1,14 @@ +import os import sys as _sys - PY2 = _sys.version_info < (3,) PY3 = not PY2 if not PY2: # these were moved around for Python 3 - from urllib.parse import unquote as url_unquote, urlencode + from urllib.parse import (quote as url_quote, unquote as url_unquote, + urlencode) # Python 3 does not have basestring anymore; we include # *only* the str here as this is used for textual data. @@ -46,6 +47,24 @@ """ return list(dct.values()) + def dict_iteritems(dct): + """ + Returns an iterator of items (key/value pairs) of a dictionary + + dict.items returns a view that works like .items in Python 2 + *except* any modifications in the dictionary will be visible + (and will cause errors if the view is being iterated over while + it is modified). + """ + return dct.items() + + def dict_itervalues(dct): + """ + :param dict dct: + :returns: an iterator of the values of a dictionary + """ + return dct.values() + def byte(*args): """ This is the same as Python 2 `chr(n)` for bytes in Python 3 @@ -73,8 +92,10 @@ return str(value) + def is_integer(value): + return isinstance(value, int) else: - from urllib import unquote as url_unquote, urlencode + from urllib import quote as url_quote, unquote as url_unquote, urlencode basestring = basestring str_or_bytes = basestring @@ -82,6 +103,8 @@ unicode_type = unicode dictkeys = dict.keys dictvalues = dict.values + dict_iteritems = dict.iteritems + dict_itervalues = dict.itervalues byte = chr long = long @@ -97,9 +120,15 @@ except UnicodeEncodeError: return str(value.encode('utf-8')) + def is_integer(value): + return isinstance(value, (int, long)) def as_bytes(value): if not isinstance(value, bytes): return value.encode('UTF-8') return value + +HAVE_SIGNAL = os.name == 'posix' + +EINTR_IS_EXPOSED = _sys.version_info[:2] <= (3,4) diff -Nru python-pika-0.10.0/pika/connection.py python-pika-0.11.0/pika/connection.py --- python-pika-0.10.0/pika/connection.py 2015-09-02 17:29:24.000000000 +0000 +++ python-pika-0.11.0/pika/connection.py 2017-08-29 16:54:39.000000000 +0000 @@ -2,21 +2,21 @@ import ast import sys import collections +import copy import logging import math +import numbers import platform -import threading -import urllib import warnings if sys.version_info > (3,): - import urllib.parse as urlparse + import urllib.parse as urlparse # pylint: disable=E0611,F0401 else: import urlparse from pika import __version__ from pika import callback -from pika import channel +import pika.channel from pika import credentials as pika_credentials from pika import exceptions from pika import frame @@ -25,7 +25,9 @@ from pika import spec -from pika.compat import basestring, url_unquote, dictkeys +from pika.compat import (xrange, basestring, # pylint: disable=W0622 + url_unquote, dictkeys, dict_itervalues, + dict_iteritems) BACKPRESSURE_WARNING = ("Pika: Write buffer exceeded warning threshold at " @@ -35,60 +37,138 @@ LOGGER = logging.getLogger(__name__) -class Parameters(object): +class InternalCloseReasons(object): + """Internal reason codes passed to the user's on_close_callback when the + connection is terminated abruptly, without reply code/text from the broker. + + AMQP 0.9.1 specification cites IETF RFC 821 for reply codes. To avoid + conflict, the `InternalCloseReasons` namespace uses negative integers. These + are invalid for sending to the broker. + """ + SOCKET_ERROR = -1 + BLOCKED_CONNECTION_TIMEOUT = -2 + + +class Parameters(object): # pylint: disable=R0902 """Base connection parameters class definition - :param str DEFAULT_HOST: 'localhost' - :param int DEFAULT_PORT: 5672 - :param str DEFAULT_VIRTUAL_HOST: '/' - :param str DEFAULT_USERNAME: 'guest' - :param str DEFAULT_PASSWORD: 'guest' - :param int DEFAULT_HEARTBEAT_INTERVAL: None - :param int DEFAULT_CHANNEL_MAX: 0 - :param int DEFAULT_FRAME_MAX: pika.spec.FRAME_MAX_SIZE - :param str DEFAULT_LOCALE: 'en_US' - :param int DEFAULT_CONNECTION_ATTEMPTS: 1 - :param int|float DEFAULT_RETRY_DELAY: 2.0 - :param int|float DEFAULT_SOCKET_TIMEOUT: 0.25 - :param bool DEFAULT_SSL: False - :param dict DEFAULT_SSL_OPTIONS: {} - :param int DEFAULT_SSL_PORT: 5671 - :param bool DEFAULT_BACKPRESSURE_DETECTION: False + :param bool backpressure_detection: `DEFAULT_BACKPRESSURE_DETECTION` + :param float|None blocked_connection_timeout: + `DEFAULT_BLOCKED_CONNECTION_TIMEOUT` + :param int channel_max: `DEFAULT_CHANNEL_MAX` + :param int connection_attempts: `DEFAULT_CONNECTION_ATTEMPTS` + :param credentials: `DEFAULT_CREDENTIALS` + :param int frame_max: `DEFAULT_FRAME_MAX` + :param int heartbeat: `DEFAULT_HEARTBEAT_TIMEOUT` + :param str host: `DEFAULT_HOST` + :param str locale: `DEFAULT_LOCALE` + :param int port: `DEFAULT_PORT` + :param float retry_delay: `DEFAULT_RETRY_DELAY` + :param float socket_timeout: `DEFAULT_SOCKET_TIMEOUT` + :param bool ssl: `DEFAULT_SSL` + :param dict ssl_options: `DEFAULT_SSL_OPTIONS` + :param str virtual_host: `DEFAULT_VIRTUAL_HOST` """ + + # Declare slots to protect against accidental assignment of an invalid + # attribute + __slots__ = ( + '_backpressure_detection', + '_blocked_connection_timeout', + '_channel_max', + '_client_properties', + '_connection_attempts', + '_credentials', + '_frame_max', + '_heartbeat', + '_host', + '_locale', + '_port', + '_retry_delay', + '_socket_timeout', + '_ssl', + '_ssl_options', + '_virtual_host' + ) + + DEFAULT_USERNAME = 'guest' + DEFAULT_PASSWORD = 'guest' + DEFAULT_BACKPRESSURE_DETECTION = False + DEFAULT_BLOCKED_CONNECTION_TIMEOUT = None + DEFAULT_CHANNEL_MAX = pika.channel.MAX_CHANNELS + DEFAULT_CLIENT_PROPERTIES = None + DEFAULT_CREDENTIALS = pika_credentials.PlainCredentials(DEFAULT_USERNAME, + DEFAULT_PASSWORD) DEFAULT_CONNECTION_ATTEMPTS = 1 - DEFAULT_CHANNEL_MAX = 0 DEFAULT_FRAME_MAX = spec.FRAME_MAX_SIZE - DEFAULT_HEARTBEAT_INTERVAL = None # accept server's proposal + DEFAULT_HEARTBEAT_TIMEOUT = None # None accepts server's proposal DEFAULT_HOST = 'localhost' DEFAULT_LOCALE = 'en_US' - DEFAULT_PASSWORD = 'guest' DEFAULT_PORT = 5672 DEFAULT_RETRY_DELAY = 2.0 DEFAULT_SOCKET_TIMEOUT = 0.25 DEFAULT_SSL = False - DEFAULT_SSL_OPTIONS = {} + DEFAULT_SSL_OPTIONS = None DEFAULT_SSL_PORT = 5671 - DEFAULT_USERNAME = 'guest' DEFAULT_VIRTUAL_HOST = '/' + DEFAULT_HEARTBEAT_INTERVAL = DEFAULT_HEARTBEAT_TIMEOUT # DEPRECATED + def __init__(self): - self.virtual_host = self.DEFAULT_VIRTUAL_HOST + self._backpressure_detection = None self.backpressure_detection = self.DEFAULT_BACKPRESSURE_DETECTION + + # If not None, blocked_connection_timeout is the timeout, in seconds, + # for the connection to remain blocked; if the timeout expires, the + # connection will be torn down, triggering the connection's + # on_close_callback + self._blocked_connection_timeout = None + self.blocked_connection_timeout = ( + self.DEFAULT_BLOCKED_CONNECTION_TIMEOUT) + + self._channel_max = None self.channel_max = self.DEFAULT_CHANNEL_MAX + + self._client_properties = None + self.client_properties = self.DEFAULT_CLIENT_PROPERTIES + + self._connection_attempts = None self.connection_attempts = self.DEFAULT_CONNECTION_ATTEMPTS - self.credentials = self._credentials(self.DEFAULT_USERNAME, - self.DEFAULT_PASSWORD) + + self._credentials = None + self.credentials = self.DEFAULT_CREDENTIALS + + self._frame_max = None self.frame_max = self.DEFAULT_FRAME_MAX - self.heartbeat = self.DEFAULT_HEARTBEAT_INTERVAL + + self._heartbeat = None + self.heartbeat = self.DEFAULT_HEARTBEAT_TIMEOUT + + self._host = None self.host = self.DEFAULT_HOST + + self._locale = None self.locale = self.DEFAULT_LOCALE + + self._port = None self.port = self.DEFAULT_PORT + + self._retry_delay = None self.retry_delay = self.DEFAULT_RETRY_DELAY + + self._socket_timeout = None + self.socket_timeout = self.DEFAULT_SOCKET_TIMEOUT + + self._ssl = None self.ssl = self.DEFAULT_SSL + + self._ssl_options = None self.ssl_options = self.DEFAULT_SSL_OPTIONS - self.socket_timeout = self.DEFAULT_SOCKET_TIMEOUT + + self._virtual_host = None + self.virtual_host = self.DEFAULT_VIRTUAL_HOST def __repr__(self): """Represent the info about the instance. @@ -100,244 +180,399 @@ (self.__class__.__name__, self.host, self.port, self.virtual_host, self.ssl)) - def _credentials(self, username, password): - """Return a plain credentials object for the specified username and - password. + @property + def backpressure_detection(self): + """ + :returns: boolean indicating whether backpressure detection is + enabled. Defaults to `DEFAULT_BACKPRESSURE_DETECTION`. - :param str username: The username to use - :param str password: The password to use - :rtype: pika_credentials.PlainCredentials + """ + return self._backpressure_detection + @backpressure_detection.setter + def backpressure_detection(self, value): """ - return pika_credentials.PlainCredentials(username, password) + :param bool value: boolean indicating whether to enable backpressure + detection - def _validate_backpressure(self, backpressure_detection): - """Validate that the backpressure detection option is a bool. + """ + if not isinstance(value, bool): + raise TypeError('backpressure_detection must be a bool, ' + 'but got %r' % (value,)) + self._backpressure_detection = value - :param bool backpressure_detection: The backpressure detection value - :rtype: bool - :raises: TypeError + @property + def blocked_connection_timeout(self): + """ + :returns: None or float blocked connection timeout. Defaults to + `DEFAULT_BLOCKED_CONNECTION_TIMEOUT`. + + """ + return self._blocked_connection_timeout + @blocked_connection_timeout.setter + def blocked_connection_timeout(self, value): """ - if not isinstance(backpressure_detection, bool): - raise TypeError('backpressure detection must be a bool') - return True + :param value: If not None, blocked_connection_timeout is the timeout, in + seconds, for the connection to remain blocked; if the timeout + expires, the connection will be torn down, triggering the + connection's on_close_callback - def _validate_channel_max(self, channel_max): - """Validate that the channel_max value is an int + """ + if value is not None: + if not isinstance(value, numbers.Real): + raise TypeError('blocked_connection_timeout must be a Real ' + 'number, but got %r' % (value,)) + if value < 0: + raise ValueError('blocked_connection_timeout must be >= 0, but ' + 'got %r' % (value,)) + self._blocked_connection_timeout = value - :param int channel_max: The value to validate - :rtype: bool - :raises: TypeError - :raises: ValueError + @property + def channel_max(self): + """ + :returns: max preferred number of channels. Defaults to + `DEFAULT_CHANNEL_MAX`. + :rtype: int """ - if not isinstance(channel_max, int): - raise TypeError('channel_max must be an int') - if channel_max < 1 or channel_max > 65535: - raise ValueError('channel_max must be <= 65535 and > 0') - return True + return self._channel_max - def _validate_connection_attempts(self, connection_attempts): - """Validate that the connection_attempts value is an int + @channel_max.setter + def channel_max(self, value): + """ + :param int value: max preferred number of channels, between 1 and + `channel.MAX_CHANNELS`, inclusive - :param int connection_attempts: The value to validate - :rtype: bool - :raises: TypeError - :raises: ValueError + """ + if not isinstance(value, numbers.Integral): + raise TypeError('channel_max must be an int, but got %r' % (value,)) + if value < 1 or value > pika.channel.MAX_CHANNELS: + raise ValueError('channel_max must be <= %i and > 0, but got %r' % + (pika.channel.MAX_CHANNELS, value)) + self._channel_max = value + + @property + def client_properties(self): + """ + :returns: None or dict of client properties used to override the fields + in the default client poperties reported to RabbitMQ via + `Connection.StartOk` method. Defaults to + `DEFAULT_CLIENT_PROPERTIES`. + + """ + return self._client_properties + + @client_properties.setter + def client_properties(self, value): + """ + :param value: None or dict of client properties used to override the + fields in the default client poperties reported to RabbitMQ via + `Connection.StartOk` method. + """ + if not isinstance(value, (dict, type(None),)): + raise TypeError('client_properties must be dict or None, ' + 'but got %r' % (value,)) + # Copy the mutable object to avoid accidental side-effects + self._client_properties = copy.deepcopy(value) + + @property + def connection_attempts(self): + """ + :returns: number of socket connection attempts. Defaults to + `DEFAULT_CONNECTION_ATTEMPTS`. + + """ + return self._connection_attempts + @connection_attempts.setter + def connection_attempts(self, value): """ - if not isinstance(connection_attempts, int): + :param int value: number of socket connection attempts of at least 1 + + """ + if not isinstance(value, numbers.Integral): raise TypeError('connection_attempts must be an int') - if connection_attempts < 1: - raise ValueError('connection_attempts must be None or > 0') - return True + if value < 1: + raise ValueError('connection_attempts must be > 0, but got %r' % + (value,)) + self._connection_attempts = value - def _validate_credentials(self, credentials): - """Validate the credentials passed in are using a valid object type. + @property + def credentials(self): + """ + :rtype: one of the classes from `pika.credentials.VALID_TYPES`. Defaults + to `DEFAULT_CREDENTIALS`. - :param pika.credentials.Credentials credentials: Credentials to validate - :rtype: bool - :raises: TypeError + """ + return self._credentials + @credentials.setter + def credentials(self, value): """ - for credential_type in pika_credentials.VALID_TYPES: - if isinstance(credentials, credential_type): - return True - raise TypeError('Credentials must be an object of type: %r' % - pika_credentials.VALID_TYPES) + :param value: authentication credential object of one of the classes + from `pika.credentials.VALID_TYPES` - def _validate_frame_max(self, frame_max): - """Validate that the frame_max value is an int and does not exceed - the maximum frame size and is not less than the frame min size. + """ + if not isinstance(value, tuple(pika_credentials.VALID_TYPES)): + raise TypeError('Credentials must be an object of type: %r, but ' + 'got %r' % (pika_credentials.VALID_TYPES, value)) + # Copy the mutable object to avoid accidental side-effects + self._credentials = copy.deepcopy(value) - :param int frame_max: The value to validate - :rtype: bool - :raises: TypeError - :raises: InvalidMinimumFrameSize + @property + def frame_max(self): + """ + :returns: desired maximum AMQP frame size to use. Defaults to + `DEFAULT_FRAME_MAX`. """ - if not isinstance(frame_max, int): - raise TypeError('frame_max must be an int') - if frame_max < spec.FRAME_MIN_SIZE: - raise exceptions.InvalidMinimumFrameSize - elif frame_max > spec.FRAME_MAX_SIZE: - raise exceptions.InvalidMaximumFrameSize - return True + return self._frame_max - def _validate_heartbeat_interval(self, heartbeat_interval): - """Validate that the heartbeat_interval value is an int + @frame_max.setter + def frame_max(self, value): + """ + :param int value: desired maximum AMQP frame size to use between + `spec.FRAME_MIN_SIZE` and `spec.FRAME_MAX_SIZE`, inclusive - :param int heartbeat_interval: The value to validate - :rtype: bool - :raises: TypeError - :raises: ValueError + """ + if not isinstance(value, numbers.Integral): + raise TypeError('frame_max must be an int, but got %r' % (value,)) + if value < spec.FRAME_MIN_SIZE: + raise ValueError('Min AMQP 0.9.1 Frame Size is %i, but got %r', + (spec.FRAME_MIN_SIZE, value,)) + elif value > spec.FRAME_MAX_SIZE: + raise ValueError('Max AMQP 0.9.1 Frame Size is %i, but got %r', + (spec.FRAME_MAX_SIZE, value,)) + self._frame_max = value + @property + def heartbeat(self): """ - if not isinstance(heartbeat_interval, int): - raise TypeError('heartbeat must be an int') - if heartbeat_interval < 0: - raise ValueError('heartbeat_interval must >= 0') - return True + :returns: desired connection heartbeat timeout for negotiation or + None to accept broker's value. 0 turns heartbeat off. Defaults to + `DEFAULT_HEARTBEAT_TIMEOUT`. + :rtype: integer, float, or None - def _validate_host(self, host): - """Validate that the host value is an str + """ + return self._heartbeat - :param str|unicode host: The value to validate - :rtype: bool - :raises: TypeError + @heartbeat.setter + def heartbeat(self, value): + """ + :param value: desired connection heartbeat timeout for negotiation or + None to accept broker's value. 0 turns heartbeat off. """ - if not isinstance(host, basestring): - raise TypeError('host must be a str or unicode str') - return True + if value is not None: + if not isinstance(value, numbers.Integral): + raise TypeError('heartbeat must be an int, but got %r' % + (value,)) + if value < 0: + raise ValueError('heartbeat must >= 0, but got %r' % (value,)) + self._heartbeat = value - def _validate_locale(self, locale): - """Validate that the locale value is an str + @property + def host(self): + """ + :returns: hostname or ip address of broker. Defaults to `DEFAULT_HOST`. + :rtype: str - :param str locale: The value to validate - :rtype: bool - :raises: TypeError + """ + return self._host + @host.setter + def host(self, value): """ - if not isinstance(locale, basestring): - raise TypeError('locale must be a str') - return True + :param str value: hostname or ip address of broker - def _validate_port(self, port): - """Validate that the port value is an int + """ + if not isinstance(value, basestring): + raise TypeError('host must be a str or unicode str, but got %r' % + (value,)) + self._host = value - :param int port: The value to validate - :rtype: bool - :raises: TypeError + @property + def locale(self): + """ + :returns: locale value to pass to broker; e.g., 'en_US'. Defaults to + `DEFAULT_LOCALE`. + :rtype: str """ - if not isinstance(port, int): - raise TypeError('port must be an int') - return True + return self._locale - def _validate_retry_delay(self, retry_delay): - """Validate that the retry_delay value is an int or float + @locale.setter + def locale(self, value): + """ + :param str value: locale value to pass to broker; e.g., "en_US" - :param int|float retry_delay: The value to validate - :rtype: bool - :raises: TypeError + """ + if not isinstance(value, basestring): + raise TypeError('locale must be a str, but got %r' % (value,)) + self._locale = value + @property + def port(self): """ - if not any([isinstance(retry_delay, int), - isinstance(retry_delay, float)]): - raise TypeError('retry_delay must be a float or int') - return True + :returns: port number of broker's listening socket. Defaults to + `DEFAULT_PORT`. + :rtype: int - def _validate_socket_timeout(self, socket_timeout): - """Validate that the socket_timeout value is an int or float + """ + return self._port - :param int|float socket_timeout: The value to validate - :rtype: bool - :raises: TypeError + @port.setter + def port(self, value): + """ + :param int value: port number of broker's listening socket + + """ + if not isinstance(value, numbers.Integral): + raise TypeError('port must be an int, but got %r' % (value,)) + self._port = value + @property + def retry_delay(self): """ - if not any([isinstance(socket_timeout, int), - isinstance(socket_timeout, float)]): - raise TypeError('socket_timeout must be a float or int') - if not socket_timeout > 0: - raise ValueError('socket_timeout must be > 0') - return True + :returns: interval between socket connection attempts; see also + `connection_attempts`. Defaults to `DEFAULT_RETRY_DELAY`. + :rtype: float - def _validate_ssl(self, ssl): - """Validate the SSL toggle is a bool + """ + return self._retry_delay - :param bool ssl: The SSL enabled/disabled value - :rtype: bool - :raises: TypeError + @retry_delay.setter + def retry_delay(self, value): + """ + :param float value: interval between socket connection attempts; see + also `connection_attempts`. """ - if not isinstance(ssl, bool): - raise TypeError('ssl must be a bool') - return True + if not isinstance(value, numbers.Real): + raise TypeError('retry_delay must be a float or int, but got %r' % + (value,)) + self._retry_delay = value - def _validate_ssl_options(self, ssl_options): - """Validate the SSL options value is a dictionary. + @property + def socket_timeout(self): + """ + :returns: socket timeout value. Defaults to `DEFAULT_SOCKET_TIMEOUT`. + :rtype: float - :param dict|None ssl_options: SSL Options to validate - :rtype: bool - :raises: TypeError + """ + return self._socket_timeout + @socket_timeout.setter + def socket_timeout(self, value): """ - if not isinstance(ssl_options, dict) and ssl_options is not None: - raise TypeError('ssl_options must be either None or dict') - return True + :param float value: socket timeout value; NOTE: this is mostly unused + now, owing to switchover to to non-blocking socket setting after + initial socket connection establishment. - def _validate_virtual_host(self, virtual_host): - """Validate that the virtual_host value is an str + """ + if value is not None: + if not isinstance(value, numbers.Real): + raise TypeError('socket_timeout must be a float or int, ' + 'but got %r' % (value,)) + if not value > 0: + raise ValueError('socket_timeout must be > 0, but got %r' % + (value,)) + self._socket_timeout = value - :param str virtual_host: The value to validate - :rtype: bool - :raises: TypeError + @property + def ssl(self): + """ + :returns: boolean indicating whether to connect via SSL. Defaults to + `DEFAULT_SSL`. + + """ + return self._ssl + + @ssl.setter + def ssl(self, value): + """ + :param bool value: boolean indicating whether to connect via SSL + + """ + if not isinstance(value, bool): + raise TypeError('ssl must be a bool, but got %r' % (value,)) + self._ssl = value + + @property + def ssl_options(self): + """ + :returns: None or a dict of options to pass to `ssl.wrap_socket`. + Defaults to `DEFAULT_SSL_OPTIONS`. + + """ + return self._ssl_options + + @ssl_options.setter + def ssl_options(self, value): + """ + :param value: None or a dict of options to pass to `ssl.wrap_socket`. + + """ + if not isinstance(value, (dict, type(None))): + raise TypeError('ssl_options must be a dict or None, but got %r' % + (value,)) + # Copy the mutable object to avoid accidental side-effects + self._ssl_options = copy.deepcopy(value) + + @property + def virtual_host(self): + """ + :returns: rabbitmq virtual host name. Defaults to + `DEFAULT_VIRTUAL_HOST`. + + """ + return self._virtual_host + @virtual_host.setter + def virtual_host(self, value): """ - if not isinstance(virtual_host, basestring): - raise TypeError('virtual_host must be a str') - return True + :param str value: rabbitmq virtual host name + + """ + if not isinstance(value, basestring): + raise TypeError('virtual_host must be a str, but got %r' % (value,)) + self._virtual_host = value class ConnectionParameters(Parameters): """Connection parameters object that is passed into the connection adapter upon construction. - :param str host: Hostname or IP Address to connect to - :param int port: TCP port to connect to - :param str virtual_host: RabbitMQ virtual host to use - :param pika.credentials.Credentials credentials: auth credentials - :param int channel_max: Maximum number of channels to allow - :param int frame_max: The maximum byte size for an AMQP frame - :param int heartbeat_interval: How often to send heartbeats - :param bool ssl: Enable SSL - :param dict ssl_options: Arguments passed to ssl.wrap_socket as - :param int connection_attempts: Maximum number of retry attempts - :param int|float retry_delay: Time to wait in seconds, before the next - :param int|float socket_timeout: Use for high latency networks - :param str locale: Set the locale value - :param bool backpressure_detection: Toggle backpressure detection - """ - def __init__(self, - host=None, - port=None, - virtual_host=None, - credentials=None, - channel_max=None, - frame_max=None, - heartbeat_interval=None, - ssl=None, - ssl_options=None, - connection_attempts=None, - retry_delay=None, - socket_timeout=None, - locale=None, - backpressure_detection=None): - """Create a new ConnectionParameters instance. + # Protect against accidental assignment of an invalid attribute + __slots__ = () + + class _DEFAULT(object): + """Designates default parameter value; internal use""" + pass + + def __init__(self, # pylint: disable=R0913,R0914,R0912 + host=_DEFAULT, + port=_DEFAULT, + virtual_host=_DEFAULT, + credentials=_DEFAULT, + channel_max=_DEFAULT, + frame_max=_DEFAULT, + heartbeat=_DEFAULT, + ssl=_DEFAULT, + ssl_options=_DEFAULT, + connection_attempts=_DEFAULT, + retry_delay=_DEFAULT, + socket_timeout=_DEFAULT, + locale=_DEFAULT, + backpressure_detection=_DEFAULT, + blocked_connection_timeout=_DEFAULT, + client_properties=_DEFAULT, + **kwargs): + """Create a new ConnectionParameters instance. See `Parameters` for + default values. :param str host: Hostname or IP Address to connect to :param int port: TCP port to connect to @@ -345,59 +580,103 @@ :param pika.credentials.Credentials credentials: auth credentials :param int channel_max: Maximum number of channels to allow :param int frame_max: The maximum byte size for an AMQP frame - :param int heartbeat_interval: How often to send heartbeats. - Min between this value and server's proposal - will be used. Use 0 to deactivate heartbeats - and None to accept server's proposal. + :param int heartbeat: Heartbeat timeout. Max between this value + and server's proposal will be used as the heartbeat timeout. Use 0 + to deactivate heartbeats and None to accept server's proposal. :param bool ssl: Enable SSL - :param dict ssl_options: Arguments passed to ssl.wrap_socket + :param dict ssl_options: None or a dict of arguments to be passed to + ssl.wrap_socket :param int connection_attempts: Maximum number of retry attempts :param int|float retry_delay: Time to wait in seconds, before the next :param int|float socket_timeout: Use for high latency networks :param str locale: Set the locale value - :param bool backpressure_detection: Toggle backpressure detection + :param bool backpressure_detection: DEPRECATED in favor of + `Connection.Blocked` and `Connection.Unblocked`. See + `Connection.add_on_connection_blocked_callback`. + :param blocked_connection_timeout: If not None, + the value is a non-negative timeout, in seconds, for the + connection to remain blocked (triggered by Connection.Blocked from + broker); if the timeout expires before connection becomes unblocked, + the connection will be torn down, triggering the adapter-specific + mechanism for informing client app about the closed connection ( + e.g., on_close_callback or ConnectionClosed exception) with + `reason_code` of `InternalCloseReasons.BLOCKED_CONNECTION_TIMEOUT`. + :type blocked_connection_timeout: None, int, float + :param client_properties: None or dict of client properties used to + override the fields in the default client properties reported to + RabbitMQ via `Connection.StartOk` method. + :param heartbeat_interval: DEPRECATED; use `heartbeat` instead, and + don't pass both """ super(ConnectionParameters, self).__init__() - # Create the default credentials object - if not credentials: - credentials = self._credentials(self.DEFAULT_USERNAME, - self.DEFAULT_PASSWORD) + if backpressure_detection is not self._DEFAULT: + self.backpressure_detection = backpressure_detection - # Assign the values - if host and self._validate_host(host): - self.host = host - if port is not None and self._validate_port(port): - self.port = port - if virtual_host and self._validate_virtual_host(virtual_host): - self.virtual_host = virtual_host - if credentials and self._validate_credentials(credentials): - self.credentials = credentials - if channel_max is not None and self._validate_channel_max(channel_max): + if blocked_connection_timeout is not self._DEFAULT: + self.blocked_connection_timeout = blocked_connection_timeout + + if channel_max is not self._DEFAULT: self.channel_max = channel_max - if frame_max is not None and self._validate_frame_max(frame_max): + + if client_properties is not self._DEFAULT: + self.client_properties = client_properties + + if connection_attempts is not self._DEFAULT: + self.connection_attempts = connection_attempts + + if credentials is not self._DEFAULT: + self.credentials = credentials + + if frame_max is not self._DEFAULT: self.frame_max = frame_max - if locale and self._validate_locale(locale): - self.locale = locale - if (heartbeat_interval is not None and - self._validate_heartbeat_interval(heartbeat_interval)): + + if heartbeat is not self._DEFAULT: + self.heartbeat = heartbeat + + try: + heartbeat_interval = kwargs.pop('heartbeat_interval') + except KeyError: + # Good, this one is deprecated + pass + else: + warnings.warn('heartbeat_interval is deprecated, use heartbeat', + DeprecationWarning, stacklevel=2) + if heartbeat is not self._DEFAULT: + raise TypeError('heartbeat and deprecated heartbeat_interval ' + 'are mutually-exclusive') self.heartbeat = heartbeat_interval - if ssl is not None and self._validate_ssl(ssl): - self.ssl = ssl - if ssl_options and self._validate_ssl_options(ssl_options): - self.ssl_options = ssl_options or dict() - if (connection_attempts is not None and - self._validate_connection_attempts(connection_attempts)): - self.connection_attempts = connection_attempts - if retry_delay is not None and self._validate_retry_delay(retry_delay): + + if host is not self._DEFAULT: + self.host = host + + if locale is not self._DEFAULT: + self.locale = locale + + if retry_delay is not self._DEFAULT: self.retry_delay = retry_delay - if (socket_timeout is not None and - self._validate_socket_timeout(socket_timeout)): + + if socket_timeout is not self._DEFAULT: self.socket_timeout = socket_timeout - if (backpressure_detection is not None and - self._validate_backpressure(backpressure_detection)): - self.backpressure_detection = backpressure_detection + + if ssl is not self._DEFAULT: + self.ssl = ssl + + if ssl_options is not self._DEFAULT: + self.ssl_options = ssl_options + + # Set port after SSL status is known + if port is not self._DEFAULT: + self.port = port + elif ssl is not self._DEFAULT: + self.port = self.DEFAULT_SSL_PORT if self.ssl else self.DEFAULT_PORT + + if virtual_host is not self._DEFAULT: + self.virtual_host = virtual_host + + if kwargs: + raise TypeError('Unexpected kwargs: %r' % (kwargs,)) class URLParameters(Parameters): @@ -408,17 +687,25 @@ Ensure that the virtual host is URI encoded when specified. For example if you are using the default "/" virtual host, the value should be `%2f`. + See `Parameters` for default values. + Valid query string values are: - backpressure_detection: - Toggle backpressure detection, possible values are `t` or `f` + DEPRECATED in favor of + `Connection.Blocked` and `Connection.Unblocked`. See + `Connection.add_on_connection_blocked_callback`. - channel_max: Override the default maximum channel count value + - client_properties: + dict of client properties used to override the fields in the default + client properties reported to RabbitMQ via `Connection.StartOk` + method - connection_attempts: Specify how many times pika should try and reconnect before it gives up - frame_max: Override the default maximum frame size for communication - - heartbeat_interval: + - heartbeat: Specify the number of seconds between heartbeat frames to ensure that the link between RabbitMQ and your application is up - locale: @@ -432,11 +719,24 @@ connection failure. - socket_timeout: Override low level socket timeout value + - blocked_connection_timeout: + Set the timeout, in seconds, that the connection may remain blocked + (triggered by Connection.Blocked from broker); if the timeout + expires before connection becomes unblocked, the connection will be + torn down, triggering the connection's on_close_callback :param str url: The AMQP URL to connect to """ + # Protect against accidental assignment of an invalid attribute + __slots__ = ('_all_url_query_values',) + + + # The name of the private function for parsing and setting a given URL query + # arg is constructed by catenating the query arg's name to this prefix + _SETTER_PREFIX = '_set_url_' + def __init__(self, url): """Create a new URLParameters instance. @@ -444,100 +744,165 @@ """ super(URLParameters, self).__init__() - self._process_url(url) - - def _process_url(self, url): - """Take an AMQP URL and break it up into the various parameters. - :param str url: The URL to parse + self._all_url_query_values = None - """ - if url[0:4] == 'amqp': + # Handle the Protocol scheme + # + # Fix up scheme amqp(s) to http(s) so urlparse won't barf on python + # prior to 2.7. On Python 2.6.9, + # `urlparse('amqp://127.0.0.1/%2f?socket_timeout=1')` produces an + # incorrect path='/%2f?socket_timeout=1' + if url[0:4].lower() == 'amqp': url = 'http' + url[4:] + # TODO Is support for the alternative http(s) schemes intentional? + parts = urlparse.urlparse(url) - # Handle the Protocol scheme, changing to HTTPS so urlparse doesnt barf if parts.scheme == 'https': self.ssl = True + elif parts.scheme == 'http': + self.ssl = False + elif parts.scheme: + raise ValueError('Unexpected URL scheme %r; supported scheme ' + 'values: amqp, amqps' % (parts.scheme,)) - if self._validate_host(parts.hostname): + if parts.hostname is not None: self.host = parts.hostname - if not parts.port: - if self.ssl: - self.port = self.DEFAULT_SSL_PORT if \ - self.ssl else self.DEFAULT_PORT - elif self._validate_port(parts.port): + + # Take care of port after SSL status is known + if parts.port is not None: self.port = parts.port + else: + self.port = self.DEFAULT_SSL_PORT if self.ssl else self.DEFAULT_PORT if parts.username is not None: - self.credentials = pika_credentials.PlainCredentials(parts.username, - parts.password) + self.credentials = pika_credentials.PlainCredentials(url_unquote(parts.username), + url_unquote(parts.password)) # Get the Virtual Host - if len(parts.path) <= 1: - self.virtual_host = self.DEFAULT_VIRTUAL_HOST - else: - path_parts = parts.path.split('/') - virtual_host = url_unquote(path_parts[1]) - if self._validate_virtual_host(virtual_host): - self.virtual_host = virtual_host + if len(parts.path) > 1: + self.virtual_host = url_unquote(parts.path.split('/')[1]) # Handle query string values, validating and assigning them - values = urlparse.parse_qs(parts.query) - # Cast the various numeric values to the appropriate values - for key in dictkeys(values): - # Always reassign the first list item in query values - values[key] = values[key].pop(0) - if values[key].isdigit(): - values[key] = int(values[key]) - else: - try: - values[key] = float(values[key]) - except ValueError: - pass - - if 'backpressure_detection' in values: - if values['backpressure_detection'] == 't': - self.backpressure_detection = True - elif values['backpressure_detection'] == 'f': - self.backpressure_detection = False - else: - raise ValueError('Invalid backpressure_detection value: %s' % - values['backpressure_detection']) + self._all_url_query_values = urlparse.parse_qs(parts.query) + + for name, value in dict_iteritems(self._all_url_query_values): + try: + set_value = getattr(self, self._SETTER_PREFIX + name) + except AttributeError: + raise ValueError('Unknown URL parameter: %r' % (name,)) + + try: + (value,) = value + except ValueError: + raise ValueError('Expected exactly one value for URL parameter ' + '%s, but got %i values: %s' % ( + name, len(value), value)) + + set_value(value) - if ('channel_max' in values and - self._validate_channel_max(values['channel_max'])): - self.channel_max = values['channel_max'] - - if ('connection_attempts' in values and - self._validate_connection_attempts(values['connection_attempts'])): - self.connection_attempts = values['connection_attempts'] - - if ('frame_max' in values and - self._validate_frame_max(values['frame_max'])): - self.frame_max = values['frame_max'] - - if ('heartbeat_interval' in values and - self._validate_heartbeat_interval(values['heartbeat_interval'])): - self.heartbeat = values['heartbeat_interval'] - - if ('locale' in values and self._validate_locale(values['locale'])): - self.locale = values['locale'] - - if ('retry_delay' in values and - self._validate_retry_delay(values['retry_delay'])): - self.retry_delay = values['retry_delay'] - - if ('socket_timeout' in values and - self._validate_socket_timeout(values['socket_timeout'])): - self.socket_timeout = values['socket_timeout'] - - if 'ssl_options' in values: - options = ast.literal_eval(values['ssl_options']) - if self._validate_ssl_options(options): - self.ssl_options = options + def _set_url_backpressure_detection(self, value): + """Deserialize and apply the corresponding query string arg""" + try: + backpressure_detection = {'t': True, 'f': False}[value] + except KeyError: + raise ValueError('Invalid backpressure_detection value: %r' % + (value,)) + self.backpressure_detection = backpressure_detection + + def _set_url_blocked_connection_timeout(self, value): + """Deserialize and apply the corresponding query string arg""" + try: + blocked_connection_timeout = float(value) + except ValueError as exc: + raise ValueError('Invalid blocked_connection_timeout value %r: %r' % + (value, exc,)) + self.blocked_connection_timeout = blocked_connection_timeout + + def _set_url_channel_max(self, value): + """Deserialize and apply the corresponding query string arg""" + try: + channel_max = int(value) + except ValueError as exc: + raise ValueError('Invalid channel_max value %r: %r' % (value, exc,)) + self.channel_max = channel_max + + def _set_url_client_properties(self, value): + """Deserialize and apply the corresponding query string arg""" + self.client_properties = ast.literal_eval(value) + + def _set_url_connection_attempts(self, value): + """Deserialize and apply the corresponding query string arg""" + try: + connection_attempts = int(value) + except ValueError as exc: + raise ValueError('Invalid connection_attempts value %r: %r' % + (value, exc,)) + self.connection_attempts = connection_attempts + + def _set_url_frame_max(self, value): + """Deserialize and apply the corresponding query string arg""" + try: + frame_max = int(value) + except ValueError as exc: + raise ValueError('Invalid frame_max value %r: %r' % (value, exc,)) + self.frame_max = frame_max + + def _set_url_heartbeat(self, value): + """Deserialize and apply the corresponding query string arg""" + if 'heartbeat_interval' in self._all_url_query_values: + raise ValueError('Deprecated URL parameter heartbeat_interval must ' + 'not be specified together with heartbeat') + + try: + heartbeat_timeout = int(value) + except ValueError as exc: + raise ValueError('Invalid heartbeat value %r: %r' % (value, exc,)) + self.heartbeat = heartbeat_timeout + + def _set_url_heartbeat_interval(self, value): + """Deserialize and apply the corresponding query string arg""" + warnings.warn('heartbeat_interval is deprecated, use heartbeat', + DeprecationWarning, stacklevel=2) + + if 'heartbeat' in self._all_url_query_values: + raise ValueError('Deprecated URL parameter heartbeat_interval must ' + 'not be specified together with heartbeat') + + try: + heartbeat_timeout = int(value) + except ValueError as exc: + raise ValueError('Invalid heartbeat_interval value %r: %r' % + (value, exc,)) + self.heartbeat = heartbeat_timeout + + def _set_url_locale(self, value): + """Deserialize and apply the corresponding query string arg""" + self.locale = value + + def _set_url_retry_delay(self, value): + """Deserialize and apply the corresponding query string arg""" + try: + retry_delay = float(value) + except ValueError as exc: + raise ValueError('Invalid retry_delay value %r: %r' % (value, exc,)) + self.retry_delay = retry_delay + + def _set_url_socket_timeout(self, value): + """Deserialize and apply the corresponding query string arg""" + try: + socket_timeout = float(value) + except ValueError as exc: + raise ValueError('Invalid socket_timeout value %r: %r' % + (value, exc,)) + self.socket_timeout = socket_timeout + + def _set_url_ssl_options(self, value): + """Deserialize and apply the corresponding query string arg""" + self.ssl_options = ast.literal_eval(value) class Connection(object): @@ -552,6 +917,10 @@ :param method on_close_callback: Called when the connection is closed """ + + # Disable pylint messages concerning "method could be a funciton" + # pylint: disable=R0201 + ON_CONNECTION_BACKPRESSURE = '_on_connection_backpressure' ON_CONNECTION_BLOCKED = '_on_connection_blocked' ON_CONNECTION_CLOSED = '_on_connection_closed' @@ -564,7 +933,17 @@ CONNECTION_START = 3 CONNECTION_TUNE = 4 CONNECTION_OPEN = 5 - CONNECTION_CLOSING = 6 + CONNECTION_CLOSING = 6 # client-initiated close in progress + + _STATE_NAMES = { + CONNECTION_CLOSED: 'CLOSED', + CONNECTION_INIT: 'INIT', + CONNECTION_PROTOCOL: 'PROTOCOL', + CONNECTION_START: 'START', + CONNECTION_TUNE: 'TUNE', + CONNECTION_OPEN: 'OPEN', + CONNECTION_CLOSING: 'CLOSING' + } def __init__(self, parameters=None, @@ -580,23 +959,52 @@ :param pika.connection.Parameters parameters: Connection parameters :param method on_open_callback: Called when the connection is opened - :param method on_open_error_callback: Called if the connection cant - be opened - :param method on_close_callback: Called when the connection is closed + :param method on_open_error_callback: Called if the connection can't + be established: on_open_error_callback(connection, str|exception) + :param method on_close_callback: Called when the connection is closed: + `on_close_callback(connection, reason_code, reason_text)`, where + `reason_code` is either an IETF RFC 821 reply code for AMQP-level + closures or a value from `pika.connection.InternalCloseReasons` for + internal causes, such as socket errors. """ - self._write_lock = threading.Lock() + self.connection_state = self.CONNECTION_CLOSED + + # Holds timer when the initial connect or reconnect is scheduled + self._connection_attempt_timer = None + + # Used to hold timer if configured for Connection.Blocked timeout + self._blocked_conn_timer = None + + self.heartbeat = None + + # Set our configuration options + self.params = (copy.deepcopy(parameters) if parameters is not None else + ConnectionParameters()) # Define our callback dictionary self.callbacks = callback.CallbackManager() + # Attributes that will be properly initialized by _init_connection_state + # and/or during connection handshake. + self.server_capabilities = None + self.server_properties = None + self._body_max_length = None + self.known_hosts = None + self.closing = None + self._frame_buffer = None + self._channels = None + self._backpressure_multiplier = None + self.remaining_connection_attempts = None + + self._init_connection_state() + + # Add the on connection error callback self.callbacks.add(0, self.ON_CONNECTION_ERROR, on_open_error_callback or self._on_connection_error, False) - self.heartbeat = None - # On connection callback if on_open_callback: self.add_on_open_callback(on_open_callback) @@ -605,11 +1013,6 @@ if on_close_callback: self.add_on_close_callback(on_close_callback) - # Set our configuration options - self.params = parameters or ConnectionParameters() - - # Initialize the connection state and connect - self._init_connection_state() self.connect() def add_backpressure_callback(self, callback_method): @@ -639,7 +1042,12 @@ instead of relying on back pressure throttling. The callback will be passed the ``Connection.Blocked`` method frame. - :param method callback_method: Callback to call on `Connection.Blocked` + See also `ConnectionParameters.blocked_connection_timeout`. + + :param method callback_method: Callback to call on `Connection.Blocked`, + having the signature `callback_method(pika.frame.Method)`, where the + method frame's `method` member is of type + `pika.spec.Connection.Blocked` """ self.callbacks.add(0, spec.Connection.Blocked, callback_method, False) @@ -651,7 +1059,9 @@ ``Connection.Unblocked`` method frame. :param method callback_method: Callback to call on - `Connection.Unblocked` + `Connection.Unblocked`, having the signature + `callback_method(pika.frame.Method)`, where the method frame's + `method` member is of type `pika.spec.Connection.Unblocked` """ self.callbacks.add(0, spec.Connection.Unblocked, callback_method, False) @@ -702,6 +1112,11 @@ :rtype: pika.channel.Channel """ + if not self.is_open: + # TODO if state is OPENING, then ConnectionClosed might be wrong + raise exceptions.ConnectionClosed( + 'Channel allocation requires an open connection: %s' % self) + if not channel_number: channel_number = self._next_channel_number() self._channels[channel_number] = self._create_channel(channel_number, @@ -721,48 +1136,54 @@ """ if self.is_closing or self.is_closed: + LOGGER.warning('Suppressing close request on %s', self) return - if self._has_open_channels: - self._close_channels(reply_code, reply_text) + # Initiate graceful closing of channels that are OPEN or OPENING + self._close_channels(reply_code, reply_text) # Set our connection state self._set_connection_state(self.CONNECTION_CLOSING) LOGGER.info("Closing connection (%s): %s", reply_code, reply_text) self.closing = reply_code, reply_text - if not self._has_open_channels: - # if there are open channels then _on_close_ready will finally be - # called in _on_channel_cleanup once all channels have been closed + # If there are channels that haven't finished closing yet, then + # _on_close_ready will finally be called from _on_channel_cleanup once + # all channels have been closed + if not self._channels: + # We can initiate graceful closing of the connection right away, + # since no more channels remain self._on_close_ready() + else: + LOGGER.info('Connection.close is waiting for ' + '%d channels to close: %s', len(self._channels), self) def connect(self): """Invoke if trying to reconnect to a RabbitMQ server. Constructing the Connection object should connect on its own. """ + assert self._connection_attempt_timer is None, ( + 'connect timer was already scheduled') + + assert self.is_closed, ( + 'connect expected CLOSED state, but got: {}'.format( + self._STATE_NAMES[self.connection_state])) + self._set_connection_state(self.CONNECTION_INIT) - error = self._adapter_connect() - if not error: - return self._on_connected() - self.remaining_connection_attempts -= 1 - LOGGER.warning('Could not connect, %i attempts left', - self.remaining_connection_attempts) - if self.remaining_connection_attempts: - LOGGER.info('Retrying in %i seconds', self.params.retry_delay) - self.add_timeout(self.params.retry_delay, self.connect) - else: - self.callbacks.process(0, self.ON_CONNECTION_ERROR, self, self, - error) - self.remaining_connection_attempts = self.params.connection_attempts - self._set_connection_state(self.CONNECTION_CLOSED) - def remove_timeout(self, callback_method): - """Adapters should override to call the callback after the - specified number of seconds have elapsed, using a timer, or a - thread, or similar. + # Schedule a timer callback to start the actual connection logic from + # event loop's context, thus avoiding error callbacks in the context of + # the caller, which could be the constructor. + self._connection_attempt_timer = self.add_timeout( + 0, + self._on_connect_timer) - :param method callback_method: The callback to remove a timeout for + + def remove_timeout(self, timeout_id): + """Adapters should override: Remove a timeout + + :param str timeout_id: The timeout id to remove """ raise NotImplementedError @@ -775,7 +1196,7 @@ :param int value: The multiplier value to set """ - self._backpressure = value + self._backpressure_multiplier = value # # Connections state properties @@ -791,7 +1212,8 @@ @property def is_closing(self): """ - Returns a boolean reporting the current connection state. + Returns True if connection is in the process of closing due to + client-initiated `close` request, but closing is not yet complete. """ return self.connection_state == self.CONNECTION_CLOSING @@ -871,6 +1293,8 @@ :param int channel_number: The channel number for the callbacks """ + # pylint: disable=W0212 + # This permits us to garbage-collect our reference to the channel # regardless of whether it was closed by client or broker, and do so # after all channel-close callbacks. @@ -915,7 +1339,9 @@ """ if (value.method.version_major, - value.method.version_minor) != spec.PROTOCOL_VERSION[0:2]: + value.method.version_minor) != spec.PROTOCOL_VERSION[0:2]: + # TODO This should call _on_terminate for proper callbacks and + # cleanup raise exceptions.ProtocolVersionMismatch(frame.ProtocolHeader(), value) @@ -926,7 +1352,7 @@ :rtype: dict """ - return { + properties = { 'product': PRODUCT, 'platform': 'Python %s' % platform.python_version(), 'capabilities': { @@ -940,36 +1366,36 @@ 'version': __version__ } + if self.params.client_properties: + properties.update(self.params.client_properties) + + return properties + def _close_channels(self, reply_code, reply_text): - """Close the open channels with the specified reply_code and reply_text. + """Initiate graceful closing of channels that are in OPEN or OPENING + states, passing reply_code and reply_text. :param int reply_code: The code for why the channels are being closed :param str reply_text: The text reason for why the channels are closing """ - if self.is_open: - for channel_number in dictkeys(self._channels): - if self._channels[channel_number].is_open: - self._channels[channel_number].close(reply_code, reply_text) - else: - del self._channels[channel_number] - # Force any lingering callbacks to be removed - # moved inside else block since channel's _cleanup removes - # callbacks - self.callbacks.cleanup(channel_number) - else: - self._channels = dict() + assert self.is_open, str(self) - def _combine(self, a, b): + for channel_number in dictkeys(self._channels): + chan = self._channels[channel_number] + if not (chan.is_closing or chan.is_closed): + chan.close(reply_code, reply_text) + + def _combine(self, val1, val2): """Pass in two values, if a is 0, return b otherwise if b is 0, return a. If neither case matches return the smallest value. - :param int a: The first value - :param int b: The second value + :param int val1: The first value + :param int val2: The second value :rtype: int """ - return min(a, b) or (a or b) + return min(val1, val2) or (val1 or val2) def _connect(self): """Attempt to connect to RabbitMQ @@ -989,7 +1415,7 @@ """ LOGGER.debug('Creating channel %s', channel_number) - return channel.Channel(self, channel_number, on_open_callback) + return pika.channel.Channel(self, channel_number, on_open_callback) def _create_heartbeat_checker(self): """Create a heartbeat checker instance if there is a heartbeat interval @@ -1018,14 +1444,15 @@ """ if not value.channel_number in self._channels: - if self._is_basic_deliver_frame(value): - self._reject_out_of_band_delivery(value.channel_number, - value.method.delivery_tag) - else: - LOGGER.warning("Received %r for non-existing channel %i", value, - value.channel_number) + # This should never happen and would constitute breach of the + # protocol + LOGGER.critical( + 'Received %s frame for unregistered channel %i on %s', + value.NAME, value.channel_number, self) return - return self._channels[value.channel_number]._handle_content_frame(value) + + # pylint: disable=W0212 + self._channels[value.channel_number]._handle_content_frame(value) def _detect_backpressure(self): """Attempt to calculate if TCP backpressure is being applied due to @@ -1034,8 +1461,8 @@ """ avg_frame_size = self.bytes_sent / self.frames_sent - buffer_size = sum([len(frame) for frame in self.outbound_buffer]) - if buffer_size > (avg_frame_size * self._backpressure): + buffer_size = sum([len(f) for f in self.outbound_buffer]) + if buffer_size > (avg_frame_size * self._backpressure_multiplier): LOGGER.warning(BACKPRESSURE_WARNING, buffer_size, int(buffer_size / avg_frame_size)) self.callbacks.process(0, self.ON_CONNECTION_BACKPRESSURE, self) @@ -1074,20 +1501,12 @@ (auth_type, response) = self.params.credentials.response_for(method_frame.method) if not auth_type: + # TODO this should call _on_terminate for proper callbacks and + # cleanup instead raise exceptions.AuthenticationError(self.params.credentials.TYPE) self.params.credentials.erase_credentials() return auth_type, response - @property - def _has_open_channels(self): - """Returns true if channels are open. - - :rtype: bool - - """ - return any([self._channels[num].is_open - for num in dictkeys(self._channels)]) - def _has_pending_callbacks(self, value): """Return true if there are any callbacks pending for the specified frame. @@ -1130,7 +1549,7 @@ self.heartbeat = None # Default back-pressure multiplier value - self._backpressure = 10 + self._backpressure_multiplier = 10 # When closing, hold reason why self.closing = 0, 'Not specified' @@ -1138,25 +1557,29 @@ # Our starting point once connected, first frame received self._add_connection_start_callback() - def _is_basic_deliver_frame(self, frame_value): - """Returns true if the frame is a Basic.Deliver - - :param pika.frame.Method frame_value: The frame to check - :rtype: bool - - """ - return isinstance(frame_value, spec.Basic.Deliver) - - def _is_connection_close_frame(self, value): - """Returns true if the frame is a Connection.Close frame. - - :param pika.frame.Method value: The frame to check - :rtype: bool - - """ - if not value: - return False - return isinstance(value.method, spec.Connection.Close) + # Add a callback handler for the Broker telling us to disconnect. + # NOTE: As of RabbitMQ 3.6.0, RabbitMQ broker may send Connection.Close + # to signal error during connection setup (and wait a longish time + # before closing the TCP/IP stream). Earlier RabbitMQ versions + # simply closed the TCP/IP stream. + self.callbacks.add(0, spec.Connection.Close, self._on_connection_close) + + if self._connection_attempt_timer is not None: + # Connection attempt timer was active when teardown was initiated + self.remove_timeout(self._connection_attempt_timer) + self._connection_attempt_timer = None + + if self.params.blocked_connection_timeout is not None: + if self._blocked_conn_timer is not None: + # Blocked connection timer was active when teardown was + # initiated + self.remove_timeout(self._blocked_conn_timer) + self._blocked_conn_timer = None + + self.add_on_connection_blocked_callback( + self._on_connection_blocked) + self.add_on_connection_unblocked_callback( + self._on_connection_unblocked) def _is_method_frame(self, value): """Returns true if the frame is a method frame. @@ -1181,14 +1604,14 @@ :rtype: int """ - limit = self.params.channel_max or channel.MAX_CHANNELS - if len(self._channels) == limit: + limit = self.params.channel_max or pika.channel.MAX_CHANNELS + if len(self._channels) >= limit: raise exceptions.NoFreeChannels() - ckeys = set(self._channels.keys()) - if not ckeys: - return 1 - return [x + 1 for x in sorted(ckeys) if x + 1 not in ckeys][0] + for num in xrange(1, len(self._channels) + 1): + if num not in self._channels: + return num + return len(self._channels) + 1 def _on_channel_cleanup(self, channel): """Remove the channel from the dict of channels when Channel.CloseOk is @@ -1204,8 +1627,21 @@ except KeyError: LOGGER.error('Channel %r not in channels', channel.channel_number) - if self.is_closing and not self._has_open_channels: - self._on_close_ready() + if self.is_closing: + if not self._channels: + # Initiate graceful closing of the connection + self._on_close_ready() + else: + # Once Connection enters CLOSING state, all remaining channels + # should also be in CLOSING state. Deviation from this would + # prevent Connection from completing its closing procedure. + channels_not_in_closing_state = [ + chan for chan in dict_itervalues(self._channels) + if not chan.is_closing] + if channels_not_in_closing_state: + LOGGER.critical( + 'Connection in CLOSING state has non-CLOSING ' + 'channels: %r', channels_not_in_closing_state) def _on_close_ready(self): """Called when the Connection is in a state that it can close after @@ -1214,8 +1650,9 @@ """ if self.is_closed: - LOGGER.warning('Invoked while already closed') + LOGGER.warning('_on_close_ready invoked when already closed') return + self._send_connection_close(self.closing[0], self.closing[1]) def _on_connected(self): @@ -1228,34 +1665,76 @@ # Start the communication with the RabbitMQ Broker self._send_frame(frame.ProtocolHeader()) - def _on_connection_closed(self, method_frame, from_adapter=False): - """Called when the connection is closed remotely. The from_adapter value - will be true if the connection adapter has been disconnected from - the broker and the method was invoked directly instead of by receiving - a Connection.Close frame. - - :param pika.frame.Method: The Connection.Close frame - :param bool from_adapter: Called by the connection adapter - - """ - if method_frame and self._is_connection_close_frame(method_frame): - self.closing = (method_frame.method.reply_code, - method_frame.method.reply_text) + def _on_blocked_connection_timeout(self): + """ Called when the "connection blocked timeout" expires. When this + happens, we tear down the connection - # Save the codes because self.closing gets reset by _adapter_disconnect - reply_code, reply_text = self.closing + """ + self._blocked_conn_timer = None + self._on_terminate(InternalCloseReasons.BLOCKED_CONNECTION_TIMEOUT, + 'Blocked connection timeout expired') - # Stop the heartbeat checker if it exists - self._remove_heartbeat() + def _on_connection_blocked(self, method_frame): + """Handle Connection.Blocked notification from RabbitMQ broker - # If this did not come from the connection adapter, close the socket - if not from_adapter: - self._adapter_disconnect() + :param pika.frame.Method method_frame: method frame having `method` + member of type `pika.spec.Connection.Blocked` + """ + LOGGER.warning('Received %s from broker', method_frame) - # Invoke a method frame neutral close - self._on_disconnect(reply_code, reply_text) + if self._blocked_conn_timer is not None: + # RabbitMQ is not supposed to repeat Connection.Blocked, but it + # doesn't hurt to be careful + LOGGER.warning('_blocked_conn_timer %s already set when ' + '_on_connection_blocked is called', + self._blocked_conn_timer) + else: + self._blocked_conn_timer = self.add_timeout( + self.params.blocked_connection_timeout, + self._on_blocked_connection_timeout) + + def _on_connection_unblocked(self, method_frame): + """Handle Connection.Unblocked notification from RabbitMQ broker + + :param pika.frame.Method method_frame: method frame having `method` + member of type `pika.spec.Connection.Blocked` + """ + LOGGER.info('Received %s from broker', method_frame) + + if self._blocked_conn_timer is None: + # RabbitMQ is supposed to pair Connection.Blocked/Unblocked, but it + # doesn't hurt to be careful + LOGGER.warning('_blocked_conn_timer was not active when ' + '_on_connection_unblocked called') + else: + self.remove_timeout(self._blocked_conn_timer) + self._blocked_conn_timer = None + + def _on_connection_close(self, method_frame): + """Called when the connection is closed remotely via Connection.Close + frame from broker. + + :param pika.frame.Method method_frame: The Connection.Close frame + + """ + LOGGER.debug('_on_connection_close: frame=%s', method_frame) - def _on_connection_error(self, connection_unused, error_message=None): + self.closing = (method_frame.method.reply_code, + method_frame.method.reply_text) + + self._on_terminate(self.closing[0], self.closing[1]) + + def _on_connection_close_ok(self, method_frame): + """Called when Connection.CloseOk is received from remote. + + :param pika.frame.Method method_frame: The Connection.CloseOk frame + + """ + LOGGER.debug('_on_connection_close_ok: frame=%s', method_frame) + + self._on_terminate(self.closing[0], self.closing[1]) + + def _on_connection_error(self, _connection_unused, error_message=None): """Default behavior when the connecting connection can not connect. :raises: exceptions.AMQPConnectionError @@ -1270,10 +1749,11 @@ called the Connection.Open on the server and it has replied with Connection.Ok. """ - self.known_hosts = method_frame.method.known_hosts + # TODO _on_connection_open - what if user started closing it already? + # It shouldn't transition to OPEN if in closing state. Just log and skip + # the rest. - # Add a callback handler for the Broker telling us to disconnect - self.callbacks.add(0, spec.Connection.Close, self._on_connection_closed) + self.known_hosts = method_frame.method.known_hosts # We're now connected at the AMQP level self._set_connection_state(self.CONNECTION_OPEN) @@ -1297,6 +1777,87 @@ self._add_connection_tune_callback() self._send_connection_start_ok(*self._get_credentials(method_frame)) + def _on_connect_timer(self): + """Callback for self._connection_attempt_timer: initiate connection + attempt in the context of the event loop + + """ + self._connection_attempt_timer = None + + error = self._adapter_connect() + if not error: + return self._on_connected() + self.remaining_connection_attempts -= 1 + LOGGER.warning('Could not connect, %i attempts left', + self.remaining_connection_attempts) + if self.remaining_connection_attempts > 0: + LOGGER.info('Retrying in %i seconds', self.params.retry_delay) + self._connection_attempt_timer = self.add_timeout( + self.params.retry_delay, + self._on_connect_timer) + else: + # TODO connect must not call failure callback from constructor. The + # current behavior is error-prone, because the user code may get a + # callback upon socket connection failure before user's other state + # may be sufficiently initialized. Constructors must either succeed + # or raise an exception. To be forward-compatible with failure + # reporting from fully non-blocking connection establishment, + # connect() should set INIT state and schedule a 0-second timer to + # continue the rest of the logic in a private method. The private + # method should use itself instead of connect() as the callback for + # scheduling retries. + + # TODO This should use _on_terminate for consistent behavior/cleanup + self.callbacks.process(0, self.ON_CONNECTION_ERROR, self, self, + error) + self.remaining_connection_attempts = self.params.connection_attempts + self._set_connection_state(self.CONNECTION_CLOSED) + + @staticmethod + def _tune_heartbeat_timeout(client_value, server_value): + """ Determine heartbeat timeout per AMQP 0-9-1 rules + + Per https://www.rabbitmq.com/resources/specs/amqp0-9-1.pdf, + + > Both peers negotiate the limits to the lowest agreed value as follows: + > - The server MUST tell the client what limits it proposes. + > - The client responds and **MAY reduce those limits** for its + connection + + When negotiating heartbeat timeout, the reasoning needs to be reversed. + The way I think it makes sense to interpret this rule for heartbeats is + that the consumable resource is the frequency of heartbeats, which is + the inverse of the timeout. The more frequent heartbeats consume more + resources than less frequent heartbeats. So, when both heartbeat + timeouts are non-zero, we should pick the max heartbeat timeout rather + than the min. The heartbeat timeout value 0 (zero) has a special + meaning - it's supposed to disable the timeout. This makes zero a + setting for the least frequent heartbeats (i.e., never); therefore, if + any (or both) of the two is zero, then the above rules would suggest + that negotiation should yield 0 value for heartbeat, effectively turning + it off. + + :param client_value: None to accept server_value; otherwise, an integral + number in seconds; 0 (zero) to disable heartbeat. + :param server_value: integral value of the heartbeat timeout proposed by + broker; 0 (zero) to disable heartbeat. + + :returns: the value of the heartbeat timeout to use and return to broker + """ + if client_value is None: + # Accept server's limit + timeout = server_value + elif client_value == 0 or server_value == 0: + # 0 has a special meaning "disable heartbeats", which makes it the + # least frequent heartbeat value there is + timeout = 0 + else: + # Pick the one with the bigger heartbeat timeout (i.e., the less + # frequent one) + timeout = max(client_value, server_value) + + return timeout + def _on_connection_tune(self, method_frame): """Once the Broker sends back a Connection.Tune, we will set our tuning variables that have been returned to us and kick off the Heartbeat @@ -1313,11 +1874,11 @@ method_frame.method.channel_max) self.params.frame_max = self._combine(self.params.frame_max, method_frame.method.frame_max) - if self.params.heartbeat is None: - self.params.heartbeat = method_frame.method.heartbeat - elif self.params.heartbeat != 0: - self.params.heartbeat = self._combine(self.params.heartbeat, - method_frame.method.heartbeat) + + # Negotiate heatbeat timeout + self.params.heartbeat = self._tune_heartbeat_timeout( + client_value=self.params.heartbeat, + server_value=method_frame.method.heartbeat) # Calculate the maximum pieces for body frames self._body_max_length = self._get_body_frame_max_length() @@ -1346,27 +1907,92 @@ self._trim_frame_buffer(consumed_count) self._process_frame(frame_value) - def _on_disconnect(self, reply_code, reply_text): - """Invoke passing in the reply_code and reply_text from internal - methods to the adapter. Called from on_connection_closed and Heartbeat - timeouts. - - :param str reply_code: The numeric close code - :param str reply_text: The text close reason + def _on_terminate(self, reason_code, reason_text): + """Terminate the connection and notify registered ON_CONNECTION_ERROR + and/or ON_CONNECTION_CLOSED callbacks + + :param integer reason_code: either IETF RFC 821 reply code for + AMQP-level closures or a value from `InternalCloseReasons` for + internal causes, such as socket errors + :param str reason_text: human-readable text message describing the error + """ + LOGGER.info( + 'Disconnected from RabbitMQ at %s:%i (%s): %s', + self.params.host, self.params.port, reason_code, + reason_text) + + if not isinstance(reason_code, numbers.Integral): + raise TypeError('reason_code must be an integer, but got %r' + % (reason_code,)) - """ - LOGGER.warning('Disconnected from RabbitMQ at %s:%i (%s): %s', - self.params.host, self.params.port, reply_code, - reply_text) + # Stop the heartbeat checker if it exists + self._remove_heartbeat() + + # Remove connection management callbacks + # TODO This call was moved here verbatim from legacy code and the + # following doesn't seem to be right: `Connection.Open` here is + # unexpected, we don't appear to ever register it, and the broker + # shouldn't be sending `Connection.Open` to us, anyway. + self._remove_callbacks(0, [spec.Connection.Close, spec.Connection.Start, + spec.Connection.Open]) + + if self.params.blocked_connection_timeout is not None: + self._remove_callbacks(0, [spec.Connection.Blocked, + spec.Connection.Unblocked]) + + # Close the socket + self._adapter_disconnect() + + # Determine whether this was an error during connection setup + connection_error = None + + if self.connection_state == self.CONNECTION_PROTOCOL: + LOGGER.error('Incompatible Protocol Versions') + connection_error = exceptions.IncompatibleProtocolError( + reason_code, + reason_text) + elif self.connection_state == self.CONNECTION_START: + LOGGER.error('Connection closed while authenticating indicating a ' + 'probable authentication error') + connection_error = exceptions.ProbableAuthenticationError( + reason_code, + reason_text) + elif self.connection_state == self.CONNECTION_TUNE: + LOGGER.error('Connection closed while tuning the connection ' + 'indicating a probable permission error when ' + 'accessing a virtual host') + connection_error = exceptions.ProbableAccessDeniedError( + reason_code, + reason_text) + elif self.connection_state not in [self.CONNECTION_OPEN, + self.CONNECTION_CLOSED, + self.CONNECTION_CLOSING]: + LOGGER.warning('Unexpected connection state on disconnect: %i', + self.connection_state) + + # Transition to closed state self._set_connection_state(self.CONNECTION_CLOSED) + + # Inform our channel proxies for channel in dictkeys(self._channels): if channel not in self._channels: continue - method_frame = frame.Method(channel, spec.Channel.Close(reply_code, - reply_text)) - self._channels[channel]._on_close(method_frame) - self._process_connection_closed_callbacks(reply_code, reply_text) - self._remove_connection_callbacks() + # pylint: disable=W0212 + self._channels[channel]._on_close_meta(reason_code, reason_text) + + # Inform interested parties + if connection_error is not None: + LOGGER.error('Connection setup failed due to %r', connection_error) + self.callbacks.process(0, + self.ON_CONNECTION_ERROR, + self, self, + connection_error) + + self.callbacks.process(0, self.ON_CONNECTION_CLOSED, self, self, + reason_code, reason_text) + + # Reset connection properties + self._init_connection_state() def _process_callbacks(self, frame_value): """Process the callbacks for the frame if the frame is a method frame @@ -1377,7 +2003,7 @@ """ if (self._is_method_frame(frame_value) and - self._has_pending_callbacks(frame_value)): + self._has_pending_callbacks(frame_value)): self.callbacks.process(frame_value.channel_number, # Prefix frame_value.method, # Key self, # Caller @@ -1385,17 +2011,6 @@ return True return False - def _process_connection_closed_callbacks(self, reason_code, reason_text): - """Process any callbacks that should be called when the connection is - closed. - - :param str reason_code: The numeric code from RabbitMQ for the close - :param str reason_text: The text reason fro closing - - """ - self.callbacks.process(0, self.ON_CONNECTION_CLOSED, self, self, - reason_code, reason_text) - def _process_frame(self, frame_value): """Process an inbound frame from the socket. @@ -1434,45 +2049,30 @@ """ return frame.decode_frame(self._frame_buffer) - def _reject_out_of_band_delivery(self, channel_number, delivery_tag): - """Reject a delivery on the specified channel number and delivery tag - because said channel no longer exists. - - :param int channel_number: The channel number - :param int delivery_tag: The delivery tag - - """ - LOGGER.warning('Rejected out-of-band delivery on channel %i (%s)', - channel_number, delivery_tag) - self._send_method(channel_number, spec.Basic.Reject(delivery_tag)) - - def _remove_callback(self, channel_number, method_frame): + def _remove_callback(self, channel_number, method_class): """Remove the specified method_frame callback if it is set for the specified channel number. :param int channel_number: The channel number to remove the callback on - :param pika.object.Method: The method frame for the callback + :param pika.amqp_object.Method method_class: The method class for the + callback """ - self.callbacks.remove(str(channel_number), method_frame) + self.callbacks.remove(str(channel_number), method_class) - def _remove_callbacks(self, channel_number, method_frames): + def _remove_callbacks(self, channel_number, method_classes): """Remove the callbacks for the specified channel number and list of method frames. :param int channel_number: The channel number to remove the callback on - :param list method_frames: The method frames for the callback + :param sequence method_classes: The method classes (derived from + `pika.amqp_object.Method`) for the callbacks """ - for method_frame in method_frames: + for method_frame in method_classes: self._remove_callback(channel_number, method_frame) - def _remove_connection_callbacks(self): - """Remove all callbacks for the connection""" - self._remove_callbacks(0, [spec.Connection.Close, spec.Connection.Start, - spec.Connection.Open]) - - def _rpc(self, channel_number, method_frame, + def _rpc(self, channel_number, method, callback_method=None, acceptable_replies=None): """Make an RPC call for the given callback, channel number and method. @@ -1480,7 +2080,7 @@ server with the specified callback. :param int channel_number: The channel number for the RPC call - :param pika.object.Method method_frame: The method frame to call + :param pika.amqp_object.Method method: The method frame to call :param method callback_method: The callback for the RPC response :param list acceptable_replies: The replies this RPC call expects @@ -1498,7 +2098,7 @@ self.callbacks.add(channel_number, reply, callback_method) # Send the rpc call to RabbitMQ - self._send_method(channel_number, method_frame) + self._send_method(channel_number, method) def _send_connection_close(self, reply_code, reply_text): """Send a Connection.Close method frame. @@ -1508,7 +2108,7 @@ """ self._rpc(0, spec.Connection.Close(reply_code, reply_text, 0, 0), - self._on_connection_closed, [spec.Connection.CloseOk]) + self._on_connection_close_ok, [spec.Connection.CloseOk]) def _send_connection_open(self): """Send a Connection.Open frame""" @@ -1544,7 +2144,7 @@ """ if self.is_closed: - LOGGER.critical('Attempted to send frame when closed') + LOGGER.error('Attempted to send frame when closed') raise exceptions.ConnectionClosed marshaled_frame = frame_value.marshal() @@ -1555,52 +2155,51 @@ if self.params.backpressure_detection: self._detect_backpressure() - def _send_method(self, channel_number, method_frame, content=None): + def _send_method(self, channel_number, method, content=None): """Constructs a RPC method frame and then sends it to the broker. :param int channel_number: The channel number for the frame - :param pika.object.Method method_frame: The method frame to send + :param pika.amqp_object.Method method: The method to send :param tuple content: If set, is a content frame, is tuple of properties and body. """ - if not content: - with self._write_lock: - self._send_frame(frame.Method(channel_number, method_frame)) - return - self._send_message(channel_number, method_frame, content) + if content: + self._send_message(channel_number, method, content) + else: + self._send_frame(frame.Method(channel_number, method)) - def _send_message(self, channel_number, method_frame, content=None): + def _send_message(self, channel_number, method, content=None): """Send the message directly, bypassing the single _send_frame invocation by directly appending to the output buffer and flushing within a lock. :param int channel_number: The channel number for the frame - :param pika.object.Method method_frame: The method frame to send + :param pika.amqp_object.Method method: The method frame to send :param tuple content: If set, is a content frame, is tuple of properties and body. """ length = len(content[1]) - write_buffer = [frame.Method(channel_number, method_frame).marshal(), + write_buffer = [frame.Method(channel_number, method).marshal(), frame.Header(channel_number, length, content[0]).marshal()] if content[1]: chunks = int(math.ceil(float(length) / self._body_max_length)) - for chunk in range(0, chunks): - s = chunk * self._body_max_length - e = s + self._body_max_length - if e > length: - e = length + for chunk in xrange(0, chunks): + start = chunk * self._body_max_length + end = start + self._body_max_length + if end > length: + end = length write_buffer.append(frame.Body(channel_number, - content[1][s:e]).marshal()) + content[1][start:end]).marshal()) - with self._write_lock: - self.outbound_buffer += write_buffer - self.frames_sent += len(write_buffer) - self._flush_outbound() - if self.params.backpressure_detection: - self._detect_backpressure() + self.outbound_buffer += write_buffer + self.frames_sent += len(write_buffer) + self.bytes_sent += sum(len(frame) for frame in write_buffer) + self._flush_outbound() + if self.params.backpressure_detection: + self._detect_backpressure() def _set_connection_state(self, connection_state): """Set the connection state. diff -Nru python-pika-0.10.0/pika/credentials.py python-pika-0.11.0/pika/credentials.py --- python-pika-0.10.0/pika/credentials.py 2015-09-02 17:29:24.000000000 +0000 +++ python-pika-0.11.0/pika/credentials.py 2017-08-29 16:54:39.000000000 +0000 @@ -51,6 +51,15 @@ self.password = password self.erase_on_connect = erase_on_connect + def __eq__(self, other): + return (isinstance(other, PlainCredentials) and + other.username == self.username and + other.password == self.password and + other.erase_on_connect == self.erase_on_connect) + + def __ne__(self, other): + return not self == other + def response_for(self, start): """Validate that this type of authentication is supported @@ -84,6 +93,13 @@ """Create a new instance of ExternalCredentials""" self.erase_on_connect = False + def __eq__(self, other): + return (isinstance(other, ExternalCredentials) and + other.erase_on_connect == self.erase_on_connect) + + def __ne__(self, other): + return not self == other + def response_for(self, start): """Validate that this type of authentication is supported diff -Nru python-pika-0.10.0/pika/exceptions.py python-pika-0.11.0/pika/exceptions.py --- python-pika-0.10.0/pika/exceptions.py 2015-09-02 17:29:24.000000000 +0000 +++ python-pika-0.11.0/pika/exceptions.py 2017-08-29 16:54:39.000000000 +0000 @@ -26,7 +26,8 @@ class IncompatibleProtocolError(AMQPConnectionError): def __repr__(self): - return 'The protocol returned by the server is not supported' + return ('The protocol returned by the server is not supported: %s' % + (self.args,)) class AuthenticationError(AMQPConnectionError): @@ -40,14 +41,15 @@ def __repr__(self): return ('Client was disconnected at a connection stage indicating a ' - 'probable authentication error') + 'probable authentication error: %s' % (self.args,)) class ProbableAccessDeniedError(AMQPConnectionError): def __repr__(self): return ('Client was disconnected at a connection stage indicating a ' - 'probable denial of access to the specified virtual host') + 'probable denial of access to the specified virtual host: %s' % + (self.args,)) class NoFreeChannels(AMQPConnectionError): @@ -82,6 +84,11 @@ return 'The channel was closed: %s' % (self.args,) +class ChannelAlreadyClosing(AMQPChannelError): + """Raised when `Channel.close` is called while channel is already closing""" + pass + + class DuplicateConsumerTag(AMQPChannelError): def __repr__(self): @@ -210,12 +217,18 @@ class InvalidMinimumFrameSize(ProtocolSyntaxError): + """ DEPRECATED; pika.connection.Parameters.frame_max property setter now + raises the standard `ValueError` exception when the value is out of bounds. + """ def __repr__(self): return 'AMQP Minimum Frame Size is 4096 Bytes' class InvalidMaximumFrameSize(ProtocolSyntaxError): + """ DEPRECATED; pika.connection.Parameters.frame_max property setter now + raises the standard `ValueError` exception when the value is out of bounds. + """ def __repr__(self): return 'AMQP Maximum Frame Size is 131072 Bytes' @@ -235,3 +248,10 @@ def __repr__(self): return ('AMQP Short String can contain up to 255 bytes: ' '%.300s' % self.args[0]) + + +class DuplicateGetOkCallback(ChannelError): + + def __repr__(self): + return ('basic_get can only be called again after the callback for the' + 'previous basic_get is executed') diff -Nru python-pika-0.10.0/pika/heartbeat.py python-pika-0.11.0/pika/heartbeat.py --- python-pika-0.10.0/pika/heartbeat.py 2015-09-02 17:29:24.000000000 +0000 +++ python-pika-0.11.0/pika/heartbeat.py 2017-08-29 16:54:39.000000000 +0000 @@ -115,10 +115,14 @@ self._idle_byte_intervals) duration = self._max_idle_count * self._interval text = HeartbeatChecker._STALE_CONNECTION % duration + + # NOTE: this won't achieve the perceived effect of sending + # Connection.Close to broker, because the frame will only get buffered + # in memory before the next statement terminates the connection. self._connection.close(HeartbeatChecker._CONNECTION_FORCED, text) - self._connection._adapter_disconnect() - self._connection._on_disconnect(HeartbeatChecker._CONNECTION_FORCED, - text) + + self._connection._on_terminate(HeartbeatChecker._CONNECTION_FORCED, + text) @property def _has_received_data(self): @@ -129,7 +133,8 @@ """ return not self._bytes_received == self.bytes_received_on_connection - def _new_heartbeat_frame(self): + @staticmethod + def _new_heartbeat_frame(): """Return a new heartbeat frame. :rtype pika.frame.Heartbeat diff -Nru python-pika-0.10.0/pika/__init__.py python-pika-0.11.0/pika/__init__.py --- python-pika-0.10.0/pika/__init__.py 2015-09-02 17:29:24.000000000 +0000 +++ python-pika-0.11.0/pika/__init__.py 2017-08-29 16:54:39.000000000 +0000 @@ -1,4 +1,4 @@ -__version__ = '0.10.0' +__version__ = '0.11.0' import logging try: diff -Nru python-pika-0.10.0/pika/spec.py python-pika-0.11.0/pika/spec.py --- python-pika-0.10.0/pika/spec.py 2015-09-02 17:29:24.000000000 +0000 +++ python-pika-0.11.0/pika/spec.py 2017-08-29 16:54:39.000000000 +0000 @@ -1,19 +1,24 @@ -# ***** BEGIN LICENSE BLOCK ***** -# -# For copyright and licensing please refer to COPYING. -# -# ***** END LICENSE BLOCK ***** +""" +AMQP Specification +================== +This module implements the constants and classes that comprise AMQP protocol +level constructs. It should rarely be directly referenced outside of Pika's +own internal use. + +.. note:: Auto-generated code by codegen.py, do not edit directly. Pull +requests to this file without accompanying ``utils/codegen.py`` changes will be +rejected. -# NOTE: Autogenerated code by codegen.py, do not edit +""" import struct from pika import amqp_object from pika import data from pika.compat import str_or_bytes, unicode_type +# Python 3 support for str object str = bytes - PROTOCOL_VERSION = (0, 9, 1) PORT = 5672 @@ -39,11 +44,13 @@ NOT_IMPLEMENTED = 540 NO_CONSUMERS = 313 NO_ROUTE = 312 +PERSISTENT_DELIVERY_MODE = 2 PRECONDITION_FAILED = 406 REPLY_SUCCESS = 200 RESOURCE_ERROR = 506 RESOURCE_LOCKED = 405 SYNTAX_ERROR = 502 +TRANSIENT_DELIVERY_MODE = 1 UNEXPECTED_FRAME = 505 diff -Nru python-pika-0.10.0/pylintrc python-pika-0.11.0/pylintrc --- python-pika-0.10.0/pylintrc 1970-01-01 00:00:00.000000000 +0000 +++ python-pika-0.11.0/pylintrc 2017-08-29 16:54:39.000000000 +0000 @@ -0,0 +1,391 @@ +[MASTER] + +# Specify a configuration file. +#rcfile= + +# Python code to execute, usually for sys.path manipulation such as +# pygtk.require(). +#init-hook= + +# Profiled execution. +profile=no + +# Add files or directories to the blacklist. They should be base names, not +# paths. +ignore=CVS + +# Pickle collected data for later comparisons. +persistent=no + +# List of plugins (as comma separated values of python modules names) to load, +# usually to register additional checkers. +load-plugins= + +# Deprecated. It was used to include message's id in output. Use --msg-template +# instead. +#include-ids=no + +# Deprecated. It was used to include symbolic ids of messages in output. Use +# --msg-template instead. +#symbols=no + +# Use multiple processes to speed up Pylint. +#jobs=1 + +# Allow loading of arbitrary C extensions. Extensions are imported into the +# active Python interpreter and may run arbitrary code. +#unsafe-load-any-extension=no + +# A comma-separated list of package or module names from where C extensions may +# be loaded. Extensions are loading into the active Python interpreter and may +# run arbitrary code +#extension-pkg-whitelist= + +# Allow optimization of some AST trees. This will activate a peephole AST +# optimizer, which will apply various small optimizations. For instance, it can +# be used to obtain the result of joining multiple strings with the addition +# operator. Joining a lot of strings can lead to a maximum recursion error in +# Pylint and this flag can prevent that. It has one side effect, the resulting +# AST will be different than the one from reality. +#optimize-ast=no + + +[MESSAGES CONTROL] + +# Only show warnings with the listed confidence levels. Leave empty to show +# all. Valid levels: HIGH, INFERENCE, INFERENCE_FAILURE, UNDEFINED +confidence= + +# Enable the message, report, category or checker with the given id(s). You can +# either give multiple identifier separated by comma (,) or put this option +# multiple time. See also the "--disable" option for examples. +#enable= + +# Disable the message, report, category or checker with the given id(s). You +# can either give multiple identifiers separated by comma (,) or put this +# option multiple times (only on the command line, not in the configuration +# file where it should appear only once).You can also use "--disable=all" to +# disable everything first and then reenable specific checks. For example, if +# you want to run only the similarities checker, you can use "--disable=all +# --enable=similarities". If you want to run only the classes checker, but have +# no Warning level messages displayed, use"--disable=all --enable=classes +# --disable=W" +disable= + + +[REPORTS] + +# Set the output format. Available formats are text, parseable, colorized, msvs +# (visual studio) and html. You can also give a reporter class, eg +# mypackage.mymodule.MyReporterClass. +output-format=text + +# Put messages in a separate file for each module / package specified on the +# command line instead of printing them on stdout. Reports (if any) will be +# written in a file name "pylint_global.[txt|html]". +files-output=no + +# Tells whether to display a full report or only the messages +reports=no + +# Python expression which should return a note less than 10 (10 is the highest +# note). You have access to the variables errors warning, statement which +# respectively contain the number of errors / warnings messages and the total +# number of statements analyzed. This is used by the global evaluation report +# (RP0004). +evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10) + +# Add a comment according to your evaluation note. This is used by the global +# evaluation report (RP0004). +comment=no + +# Template used to display messages. This is a python new-style format string +# used to format the message information. See doc for all details +msg-template={msg_id}, {line:3d}:{column:2d} - {msg} ({symbol}) +#msg-template= + + +[BASIC] + +# Required attributes for module, separated by a comma +required-attributes= + +# List of builtins function names that should not be used, separated by a comma +bad-functions=map,filter,input + +# Good variable names which should always be accepted, separated by a comma +good-names=i,j,k,ex,fd,Run,_ + +# Bad variable names which should always be refused, separated by a comma +bad-names=foo,bar,baz,toto,tutu,tata + +# Colon-delimited sets of names that determine each other's naming style when +# the name regexes allow several styles. +name-group= + +# Include a hint for the correct naming format with invalid-name +include-naming-hint=no + +# Regular expression matching correct function names +function-rgx=[a-z_][a-z0-9_]{2,40}$ + +# Naming hint for function names +function-name-hint=[a-z_][a-z0-9_]{2,40}$ + +# Regular expression matching correct variable names +variable-rgx=[a-z_][a-z0-9_]{2,30}$ + +# Naming hint for variable names +variable-name-hint=[a-z_][a-z0-9_]{2,30}$ + +# Regular expression matching correct constant names +const-rgx=(([A-Z_][A-Z0-9_]*)|(__.*__))$ + +# Naming hint for constant names +const-name-hint=(([A-Z_][A-Z0-9_]*)|(__.*__))$ + +# Regular expression matching correct attribute names +attr-rgx=[a-z_][a-z0-9_]{2,40}$ + +# Naming hint for attribute names +attr-name-hint=[a-z_][a-z0-9_]{2,40}$ + +# Regular expression matching correct argument names +argument-rgx=[a-z_][a-z0-9_]{2,30}$ + +# Naming hint for argument names +argument-name-hint=[a-z_][a-z0-9_]{2,30}$ + +# Regular expression matching correct class attribute names +class-attribute-rgx=([A-Za-z_][A-Za-z0-9_]{2,40}|(__.*__))$ + +# Naming hint for class attribute names +class-attribute-name-hint=([A-Za-z_][A-Za-z0-9_]{2,40}|(__.*__))$ + +# Regular expression matching correct inline iteration names +inlinevar-rgx=[A-Za-z_][A-Za-z0-9_]*$ + +# Naming hint for inline iteration names +inlinevar-name-hint=[A-Za-z_][A-Za-z0-9_]*$ + +# Regular expression matching correct class names +class-rgx=[A-Z_][a-zA-Z0-9]+$ + +# Naming hint for class names +class-name-hint=[A-Z_][a-zA-Z0-9]+$ + +# Regular expression matching correct module names +module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$ + +# Naming hint for module names +module-name-hint=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$ + +# Regular expression matching correct method names +method-rgx=[a-z_][a-z0-9_]{2,40}$ + +# Naming hint for method names +method-name-hint=[a-z_][a-z0-9_]{2,40}$ + +# Regular expression which should only match function or class names that do +# not require a docstring. +no-docstring-rgx=__.*__ + +# Minimum line length for functions/classes that require docstrings, shorter +# ones are exempt. +docstring-min-length=-1 + + +[FORMAT] + +# Maximum number of characters on a single line. +max-line-length=100 + +# Regexp for a line that is allowed to be longer than the limit. +ignore-long-lines=^\s*(# )??$ + +# Allow the body of an if to be on the same line as the test if there is no +# else. +single-line-if-stmt=no + +# List of optional constructs for which whitespace checking is disabled +no-space-check=trailing-comma,dict-separator + +# Maximum number of lines in a module +max-module-lines=1000 + +# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1 +# tab). +indent-string=' ' + +# Number of spaces of indent required inside a hanging or continued line. +indent-after-paren=4 + +# Expected format of line ending, e.g. empty (any line ending), LF or CRLF. +expected-line-ending-format= + + +[LOGGING] + +# Logging modules to check that the string format arguments are in logging +# function parameter format +logging-modules=logging + + +[MISCELLANEOUS] + +# List of note tags to take in consideration, separated by a comma. +notes=FIXME,XXX,TODO + + +[SIMILARITIES] + +# Minimum lines number of a similarity. +min-similarity-lines=4 + +# Ignore comments when computing similarities. +ignore-comments=yes + +# Ignore docstrings when computing similarities. +ignore-docstrings=yes + +# Ignore imports when computing similarities. +ignore-imports=no + + +[SPELLING] + +# Spelling dictionary name. Available dictionaries: none. To make it working +# install python-enchant package. +spelling-dict= + +# List of comma separated words that should not be checked. +spelling-ignore-words= + +# A path to a file that contains private dictionary; one word per line. +spelling-private-dict-file= + +# Tells whether to store unknown words to indicated private dictionary in +# --spelling-private-dict-file option instead of raising a message. +spelling-store-unknown-words=no + + +[TYPECHECK] + +# Tells whether missing members accessed in mixin class should be ignored. A +# mixin class is detected if its name ends with "mixin" (case insensitive). +ignore-mixin-members=yes + +# List of module names for which member attributes should not be checked +# (useful for modules/projects where namespaces are manipulated during runtime +# and thus existing member attributes cannot be deduced by static analysis +ignored-modules= + +# List of classes names for which member attributes should not be checked +# (useful for classes with attributes dynamically set). +ignored-classes=SQLObject + +# When zope mode is activated, add a predefined set of Zope acquired attributes +# to generated-members. +zope=no + +# List of members which are set dynamically and missed by pylint inference +# system, and so shouldn't trigger E0201 when accessed. Python regular +# expressions are accepted. +generated-members=REQUEST,acl_users,aq_parent + + +[VARIABLES] + +# Tells whether we should check for unused import in __init__ files. +init-import=no + +# A regular expression matching the name of dummy variables (i.e. expectedly +# not used). +dummy-variables-rgx=_|_$|dummy + +# List of additional names supposed to be defined in builtins. Remember that +# you should avoid to define new builtins when possible. +additional-builtins= + +# List of strings which can identify a callback function by name. A callback +# name must start or end with one of those strings. +callbacks=cb_,_cb + + +[CLASSES] + +# List of interface methods to ignore, separated by a comma. This is used for +# instance to not check methods defines in Zope's Interface base class. +ignore-iface-methods=isImplementedBy,deferred,extends,names,namesAndDescriptions,queryDescriptionFor,getBases,getDescriptionFor,getDoc,getName,getTaggedValue,getTaggedValueTags,isEqualOrExtendedBy,setTaggedValue,isImplementedByInstancesOf,adaptWith,is_implemented_by + +# List of method names used to declare (i.e. assign) instance attributes. +defining-attr-methods=__init__,__new__,setUp + +# List of valid names for the first argument in a class method. +valid-classmethod-first-arg=cls + +# List of valid names for the first argument in a metaclass class method. +valid-metaclass-classmethod-first-arg=mcs + +# List of member names, which should be excluded from the protected access +# warning. +exclude-protected=_asdict,_fields,_replace,_source,_make + + +[DESIGN] + +# Maximum number of arguments for function / method +max-args=10 + +# Argument names that match this expression will be ignored. Default to name +# with leading underscore +ignored-argument-names=_.* + +# Maximum number of locals for function / method body +max-locals=15 + +# Maximum number of return / yield for function / method body +max-returns=6 + +# Maximum number of branch for function / method body +max-branches=20 + +# Maximum number of statements in function / method body +max-statements=50 + +# Maximum number of parents for a class (see R0901). +max-parents=7 + +# Maximum number of attributes for a class (see R0902). +max-attributes=20 + +# Minimum number of public methods for a class (see R0903). +min-public-methods=0 + +# Maximum number of public methods for a class (see R0904). +max-public-methods=40 + + +[IMPORTS] + +# Deprecated modules which should not be used, separated by a comma +deprecated-modules=regsub,TERMIOS,Bastion,rexec + +# Create a graph of every (i.e. internal and external) dependencies in the +# given file (report RP0402 must not be disabled) +import-graph= + +# Create a graph of external dependencies in the given file (report RP0402 must +# not be disabled) +ext-import-graph= + +# Create a graph of internal dependencies in the given file (report RP0402 must +# not be disabled) +int-import-graph= + + +[EXCEPTIONS] + +# Exceptions that will emit a warning when being caught. Defaults to +# "Exception" +overgeneral-exceptions=Exception diff -Nru python-pika-0.10.0/README.rst python-pika-0.11.0/README.rst --- python-pika-0.10.0/README.rst 2015-09-02 17:29:24.000000000 +0000 +++ python-pika-0.11.0/README.rst 2017-08-29 16:54:39.000000000 +0000 @@ -1,12 +1,13 @@ -Pika, an AMQP 0-9-1 client library for Python -============================================= +Pika +==== +Pika is a RabbitMQ (AMQP-0-9-1) client library for Python. -|Version| |Downloads| |Status| |Coverage| |License| +|Version| |Status| |Coverage| |License| |Docs| Introduction ------------- -Pika is a pure-Python implementation of the AMQP 0-9-1 protocol that tries -to stay fairly independent of the underlying network support library. +Pika is a pure-Python implementation of the AMQP 0-9-1 protocol including RabbitMQ's +extensions. - Python 2.6+ and 3.3+ are supported. @@ -22,8 +23,7 @@ Documentation ------------- - -Pika's documentation can be found at `https://pika.readthedocs.org `_ +Pika's documentation can be found at `https://pika.readthedocs.io `_ Example ------- @@ -50,7 +50,7 @@ for method_frame, properties, body in channel.consume('test'): # Display the message parts and ack the message - print method_frame, properties, body + print(method_frame, properties, body) channel.basic_ack(method_frame.delivery_tag) # Escape out of the loop after 10 messages @@ -59,12 +59,13 @@ # Cancel the consumer and return any pending messages requeued_messages = channel.cancel() - print 'Requeued %i messages' % requeued_messages + print('Requeued %i messages' % requeued_messages) connection.close() Pika provides the following adapters ------------------------------------ +- AsyncioConnection - adapter for the Python3 AsyncIO event loop - BlockingConnection - enables blocking, synchronous operation on top of library for simple uses - LibevConnection - adapter for use with the libev event loop http://libev.schmorp.de - SelectConnection - fast asynchronous adapter @@ -74,9 +75,12 @@ Contributing ------------ To contribute to pika, please make sure that any new features or changes -to existing functionality include test coverage. Additionally, please format -your code using `yapf `_ with ``google`` style -prior to issuing your pull request. +to existing functionality **include test coverage**. + +*Pull requests that add or change code without coverage will most likely be rejected.* + +Additionally, please format your code using `yapf `_ +with ``google`` style prior to issuing your pull request. .. |Version| image:: https://img.shields.io/pypi/v/pika.svg? :target: http://badge.fury.io/py/pika @@ -87,8 +91,9 @@ .. |Coverage| image:: https://img.shields.io/codecov/c/github/pika/pika.svg? :target: https://codecov.io/github/pika/pika?branch=master -.. |Downloads| image:: https://img.shields.io/pypi/dm/pika.svg? - :target: https://pypi.python.org/pypi/pika - .. |License| image:: https://img.shields.io/pypi/l/pika.svg? - :target: https://pika.readthedocs.org + :target: https://pika.readthedocs.io + +.. |Docs| image:: https://readthedocs.org/projects/pika/badge/?version=stable + :target: https://pika.readthedocs.io + :alt: Documentation Status diff -Nru python-pika-0.10.0/setup.cfg python-pika-0.11.0/setup.cfg --- python-pika-0.10.0/setup.cfg 2015-09-02 17:29:24.000000000 +0000 +++ python-pika-0.11.0/setup.cfg 2017-08-29 16:54:39.000000000 +0000 @@ -1,2 +1,10 @@ [bdist_wheel] universal = 1 + +[nosetests] +with-coverage=1 +cover-package=pika +cover-branches=1 +cover-erase=1 +tests=tests/unit,tests/acceptance +verbosity=3 diff -Nru python-pika-0.10.0/setup.py python-pika-0.11.0/setup.py --- python-pika-0.10.0/setup.py 2015-09-02 17:29:24.000000000 +0000 +++ python-pika-0.11.0/setup.py 2017-08-29 16:54:39.000000000 +0000 @@ -16,12 +16,12 @@ 'with other AMQP 0-9-1 brokers.') setup(name='pika', - version='0.10.0', + version='0.11.0', description='Pika Python AMQP Client Library', long_description=open('README.rst').read(), maintainer='Gavin M. Roy', maintainer_email='gavinmroy@gmail.com', - url='https://pika.readthedocs.org ', + url='https://pika.readthedocs.io', packages=['pika', 'pika.adapters'], license='BSD', install_requires=requirements, @@ -40,6 +40,7 @@ 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', + 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: Implementation :: CPython', 'Programming Language :: Python :: Implementation :: Jython', 'Programming Language :: Python :: Implementation :: PyPy', diff -Nru python-pika-0.10.0/test-requirements.txt python-pika-0.11.0/test-requirements.txt --- python-pika-0.10.0/test-requirements.txt 2015-09-02 17:29:24.000000000 +0000 +++ python-pika-0.11.0/test-requirements.txt 2017-08-29 16:54:39.000000000 +0000 @@ -3,4 +3,4 @@ mock nose tornado -twisted +twisted<15.4.0 diff -Nru python-pika-0.10.0/tests/acceptance/async_adapter_tests.py python-pika-0.11.0/tests/acceptance/async_adapter_tests.py --- python-pika-0.10.0/tests/acceptance/async_adapter_tests.py 2015-09-02 17:29:24.000000000 +0000 +++ python-pika-0.11.0/tests/acceptance/async_adapter_tests.py 2017-08-29 16:54:39.000000000 +0000 @@ -1,13 +1,28 @@ +# Suppress pylint messages concerning missing class and method docstrings +# pylint: disable=C0111 + +# Suppress pylint warning about attribute defined outside __init__ +# pylint: disable=W0201 + +# Suppress pylint warning about access to protected member +# pylint: disable=W0212 + +# Suppress pylint warning about unused argument +# pylint: disable=W0613 + import time import uuid from pika import spec from pika.compat import as_bytes +import pika.connection +import pika.frame +import pika.spec from async_test_base import (AsyncTestCase, BoundQueueTestCase, AsyncAdapters) -class TestA_Connect(AsyncTestCase, AsyncAdapters): +class TestA_Connect(AsyncTestCase, AsyncAdapters): # pylint: disable=C0103 DESCRIPTION = "Connect, open channel and disconnect" def begin(self, channel): @@ -26,11 +41,49 @@ self.stop() +class TestBlockingNonBlockingBlockingRPCWontStall(AsyncTestCase, AsyncAdapters): + DESCRIPTION = ("Verify that a sequence of blocking, non-blocking, blocking " + "RPC requests won't stall") + + def begin(self, channel): + # Queue declaration params table: queue name, nowait value + self._expected_queue_params = ( + ("blocking-non-blocking-stall-check-" + uuid.uuid1().hex, False), + ("blocking-non-blocking-stall-check-" + uuid.uuid1().hex, True), + ("blocking-non-blocking-stall-check-" + uuid.uuid1().hex, False) + ) + + self._declared_queue_names = [] + + for queue, nowait in self._expected_queue_params: + channel.queue_declare(callback=self._queue_declare_ok_cb + if not nowait else None, + queue=queue, + auto_delete=True, + nowait=nowait, + arguments={'x-expires': self.TIMEOUT * 1000}) + + def _queue_declare_ok_cb(self, declare_ok_frame): + self._declared_queue_names.append(declare_ok_frame.method.queue) + + if len(self._declared_queue_names) == 2: + # Initiate check for creation of queue declared with nowait=True + self.channel.queue_declare(callback=self._queue_declare_ok_cb, + queue=self._expected_queue_params[1][0], + passive=True, + nowait=False) + elif len(self._declared_queue_names) == 3: + self.assertSequenceEqual( + sorted(self._declared_queue_names), + sorted(item[0] for item in self._expected_queue_params)) + self.stop() + + class TestConsumeCancel(AsyncTestCase, AsyncAdapters): DESCRIPTION = "Consume and cancel" def begin(self, channel): - self.queue_name = str(uuid.uuid4()) + self.queue_name = self.__class__.__name__ + ':' + uuid.uuid1().hex channel.queue_declare(self.on_queue_declared, queue=self.queue_name) def on_queue_declared(self, frame): @@ -58,7 +111,7 @@ X_TYPE = 'direct' def begin(self, channel): - self.name = self.__class__.__name__ + ':' + str(id(self)) + self.name = self.__class__.__name__ + ':' + uuid.uuid1().hex channel.exchange_declare(self.on_exchange_declared, self.name, exchange_type=self.X_TYPE, passive=False, @@ -81,7 +134,7 @@ X_TYPE2 = 'topic' def begin(self, channel): - self.name = self.__class__.__name__ + ':' + str(id(self)) + self.name = self.__class__.__name__ + ':' + uuid.uuid1().hex self.channel.add_on_close_callback(self.on_channel_closed) channel.exchange_declare(self.on_exchange_declared, self.name, exchange_type=self.X_TYPE1, @@ -97,7 +150,7 @@ self.connection.channel(self.on_cleanup_channel) def on_exchange_declared(self, frame): - self.channel.exchange_declare(self.on_exchange_declared, self.name, + self.channel.exchange_declare(self.on_bad_result, self.name, exchange_type=self.X_TYPE2, passive=False, durable=False, @@ -134,7 +187,8 @@ DESCRIPTION = "Create and delete a named queue" def begin(self, channel): - channel.queue_declare(self.on_queue_declared, str(id(self)), + self._q_name = self.__class__.__name__ + ':' + uuid.uuid1().hex + channel.queue_declare(self.on_queue_declared, self._q_name, passive=False, durable=False, exclusive=True, @@ -143,10 +197,9 @@ arguments={'x-expires': self.TIMEOUT * 1000}) def on_queue_declared(self, frame): - queue = str(id(self)) self.assertIsInstance(frame.method, spec.Queue.DeclareOk) # Frame's method's queue is encoded (impl detail) - self.assertEqual(frame.method.queue, queue) + self.assertEqual(frame.method.queue, self._q_name) self.channel.queue_delete(self.on_queue_delete, frame.method.queue) def on_queue_delete(self, frame): @@ -159,8 +212,9 @@ DESCRIPTION = "Should close chan: re-declared queue w/ diff params" def begin(self, channel): + self._q_name = self.__class__.__name__ + ':' + uuid.uuid1().hex self.channel.add_on_close_callback(self.on_channel_closed) - channel.queue_declare(self.on_queue_declared, str(id(self)), + channel.queue_declare(self.on_queue_declared, self._q_name, passive=False, durable=False, exclusive=True, @@ -172,7 +226,7 @@ self.stop() def on_queue_declared(self, frame): - self.channel.queue_declare(self.on_bad_result, str(id(self)), + self.channel.queue_declare(self.on_bad_result, self._q_name, passive=False, durable=True, exclusive=False, @@ -181,13 +235,13 @@ arguments={'x-expires': self.TIMEOUT * 1000}) def on_bad_result(self, frame): - self.channel.queue_delete(None, str(id(self)), nowait=True) + self.channel.queue_delete(None, self._q_name, nowait=True) raise AssertionError("Should not have received a Queue.DeclareOk") -class TestTX1_Select(AsyncTestCase, AsyncAdapters): - DESCRIPTION="Receive confirmation of Tx.Select" +class TestTX1_Select(AsyncTestCase, AsyncAdapters): # pylint: disable=C0103 + DESCRIPTION = "Receive confirmation of Tx.Select" def begin(self, channel): channel.tx_select(self.on_complete) @@ -198,8 +252,8 @@ -class TestTX2_Commit(AsyncTestCase, AsyncAdapters): - DESCRIPTION="Start a transaction, and commit it" +class TestTX2_Commit(AsyncTestCase, AsyncAdapters): # pylint: disable=C0103 + DESCRIPTION = "Start a transaction, and commit it" def begin(self, channel): channel.tx_select(self.on_selectok) @@ -213,7 +267,7 @@ self.stop() -class TestTX2_CommitFailure(AsyncTestCase, AsyncAdapters): +class TestTX2_CommitFailure(AsyncTestCase, AsyncAdapters): # pylint: disable=C0103 DESCRIPTION = "Close the channel: commit without a TX" def begin(self, channel): @@ -226,11 +280,12 @@ def on_selectok(self, frame): self.assertIsInstance(frame.method, spec.Tx.SelectOk) - def on_commitok(self, frame): + @staticmethod + def on_commitok(frame): raise AssertionError("Should not have received a Tx.CommitOk") -class TestTX3_Rollback(AsyncTestCase, AsyncAdapters): +class TestTX3_Rollback(AsyncTestCase, AsyncAdapters): # pylint: disable=C0103 DESCRIPTION = "Start a transaction, then rollback" def begin(self, channel): @@ -246,7 +301,7 @@ -class TestTX3_RollbackFailure(AsyncTestCase, AsyncAdapters): +class TestTX3_RollbackFailure(AsyncTestCase, AsyncAdapters): # pylint: disable=C0103 DESCRIPTION = "Close the channel: rollback without a TX" def begin(self, channel): @@ -256,12 +311,12 @@ def on_channel_closed(self, channel, reply_code, reply_text): self.stop() - def on_commitok(self, frame): + @staticmethod + def on_commitok(frame): raise AssertionError("Should not have received a Tx.RollbackOk") - -class TestZ_PublishAndConsume(BoundQueueTestCase, AsyncAdapters): +class TestZ_PublishAndConsume(BoundQueueTestCase, AsyncAdapters): # pylint: disable=C0103 DESCRIPTION = "Publish a message and consume it" def on_ready(self, frame): @@ -282,10 +337,11 @@ -class TestZ_PublishAndConsumeBig(BoundQueueTestCase, AsyncAdapters): +class TestZ_PublishAndConsumeBig(BoundQueueTestCase, AsyncAdapters): # pylint: disable=C0103 DESCRIPTION = "Publish a big message and consume it" - def _get_msg_body(self): + @staticmethod + def _get_msg_body(): return '\n'.join(["%s" % i for i in range(0, 2097152)]) def on_ready(self, frame): @@ -305,8 +361,7 @@ self.channel.basic_cancel(self.on_cancelled, self.ctag) - -class TestZ_PublishAndGet(BoundQueueTestCase, AsyncAdapters): +class TestZ_PublishAndGet(BoundQueueTestCase, AsyncAdapters): # pylint: disable=C0103 DESCRIPTION = "Publish a message and get it" def on_ready(self, frame): @@ -320,3 +375,86 @@ self.assertEqual(body, as_bytes(self.msg_body)) self.channel.basic_ack(method.delivery_tag) self.stop() + + +class TestZ_AccessDenied(AsyncTestCase, AsyncAdapters): # pylint: disable=C0103 + DESCRIPTION = "Verify that access denied invokes on open error callback" + + def start(self, *args, **kwargs): + self.parameters.virtual_host = str(uuid.uuid4()) + self.error_captured = False + super(TestZ_AccessDenied, self).start(*args, **kwargs) + self.assertTrue(self.error_captured) + + def on_open_error(self, connection, error): + self.error_captured = True + self.stop() + + def on_open(self, connection): + super(TestZ_AccessDenied, self).on_open(connection) + self.stop() + + +class TestBlockedConnectionTimesOut(AsyncTestCase, AsyncAdapters): # pylint: disable=C0103 + DESCRIPTION = "Verify that blocked connection terminates on timeout" + + def start(self, *args, **kwargs): + self.parameters.blocked_connection_timeout = 0.001 + self.on_closed_pair = None + super(TestBlockedConnectionTimesOut, self).start(*args, **kwargs) + self.assertEqual( + self.on_closed_pair, + (pika.connection.InternalCloseReasons.BLOCKED_CONNECTION_TIMEOUT, + 'Blocked connection timeout expired')) + + def begin(self, channel): + + # Simulate Connection.Blocked + channel.connection._on_connection_blocked(pika.frame.Method( + 0, + pika.spec.Connection.Blocked('Testing blocked connection timeout'))) + + def on_closed(self, connection, reply_code, reply_text): + """called when the connection has finished closing""" + self.on_closed_pair = (reply_code, reply_text) + super(TestBlockedConnectionTimesOut, self).on_closed(connection, + reply_code, + reply_text) + + +class TestBlockedConnectionUnblocks(AsyncTestCase, AsyncAdapters): # pylint: disable=C0103 + DESCRIPTION = "Verify that blocked-unblocked connection closes normally" + + def start(self, *args, **kwargs): + self.parameters.blocked_connection_timeout = 0.001 + self.on_closed_pair = None + super(TestBlockedConnectionUnblocks, self).start(*args, **kwargs) + self.assertEqual( + self.on_closed_pair, + (200, 'Normal shutdown')) + + def begin(self, channel): + + # Simulate Connection.Blocked + channel.connection._on_connection_blocked(pika.frame.Method( + 0, + pika.spec.Connection.Blocked( + 'Testing blocked connection unblocks'))) + + # Simulate Connection.Unblocked + channel.connection._on_connection_unblocked(pika.frame.Method( + 0, + pika.spec.Connection.Unblocked())) + + # Schedule shutdown after blocked connection timeout would expire + channel.connection.add_timeout(0.005, self.on_cleanup_timer) + + def on_cleanup_timer(self): + self.stop() + + def on_closed(self, connection, reply_code, reply_text): + """called when the connection has finished closing""" + self.on_closed_pair = (reply_code, reply_text) + super(TestBlockedConnectionUnblocks, self).on_closed(connection, + reply_code, + reply_text) diff -Nru python-pika-0.10.0/tests/acceptance/async_test_base.py python-pika-0.11.0/tests/acceptance/async_test_base.py --- python-pika-0.10.0/tests/acceptance/async_test_base.py 2015-09-02 17:29:24.000000000 +0000 +++ python-pika-0.11.0/tests/acceptance/async_test_base.py 2017-08-29 16:54:39.000000000 +0000 @@ -1,27 +1,49 @@ +# Suppress pylint warnings concerning attribute defined outside __init__ +# pylint: disable=W0201 + +# Suppress pylint messages concerning missing docstrings +# pylint: disable=C0111 + +from datetime import datetime import select +import sys import logging + try: import unittest2 as unittest except ImportError: import unittest import platform -target = platform.python_implementation() +_TARGET = platform.python_implementation() + +import uuid + +try: + from unittest import mock +except ImportError: + import mock + import pika from pika import adapters from pika.adapters import select_connection -LOGGER = logging.getLogger(__name__) -PARAMETERS = pika.URLParameters('amqp://guest:guest@localhost:5672/%2f') -DEFAULT_TIMEOUT = 15 - class AsyncTestCase(unittest.TestCase): DESCRIPTION = "" ADAPTER = None - TIMEOUT = DEFAULT_TIMEOUT + TIMEOUT = 15 + def setUp(self): + self.logger = logging.getLogger(self.__class__.__name__) + self.parameters = pika.URLParameters( + 'amqp://guest:guest@localhost:5672/%2F') + self._timed_out = False + super(AsyncTestCase, self).setUp() + + def tearDown(self): + self._stop() def shortDescription(self): method_desc = super(AsyncTestCase, self).shortDescription() @@ -30,67 +52,70 @@ else: return method_desc - def begin(self, channel): + def begin(self, channel): # pylint: disable=R0201,W0613 """Extend to start the actual tests on the channel""" - raise AssertionError("AsyncTestCase.begin_test not extended") + self.fail("AsyncTestCase.begin_test not extended") def start(self, adapter=None): + self.logger.info('start at %s', datetime.utcnow()) self.adapter = adapter or self.ADAPTER - self.connection = self.adapter(PARAMETERS, self.on_open, + self.connection = self.adapter(self.parameters, self.on_open, self.on_open_error, self.on_closed) self.timeout = self.connection.add_timeout(self.TIMEOUT, self.on_timeout) self.connection.ioloop.start() + self.assertFalse(self._timed_out) def stop(self): """close the connection and stop the ioloop""" - LOGGER.info("Stopping test") - self.connection.remove_timeout(self.timeout) - self.timeout = None + self.logger.info("Stopping test") + if self.timeout is not None: + self.connection.remove_timeout(self.timeout) + self.timeout = None self.connection.close() def _stop(self): - if hasattr(self, 'timeout') and self.timeout: + if hasattr(self, 'timeout') and self.timeout is not None: + self.logger.info("Removing timeout") self.connection.remove_timeout(self.timeout) self.timeout = None if hasattr(self, 'connection') and self.connection: + self.logger.info("Stopping ioloop") self.connection.ioloop.stop() self.connection = None - def tearDown(self): - self._stop() - def on_closed(self, connection, reply_code, reply_text): """called when the connection has finished closing""" - LOGGER.debug("Connection Closed") + self.logger.info('on_closed: %r %r %r', connection, + reply_code, reply_text) self._stop() def on_open(self, connection): + self.logger.debug('on_open: %r', connection) self.channel = connection.channel(self.begin) - def on_open_error(self, connection): + def on_open_error(self, connection, error): + self.logger.error('on_open_error: %r %r', connection, error) connection.ioloop.stop() raise AssertionError('Error connecting to RabbitMQ') def on_timeout(self): """called when stuck waiting for connection to close""" - # force the ioloop to stop - self.connection.ioloop.stop() - raise AssertionError('Test timed out') + self.logger.error('%s timed out; on_timeout called at %s', + self, datetime.utcnow()) + self.timeout = None # the dispatcher should have removed it + self._timed_out = True + # initiate cleanup + self.stop() class BoundQueueTestCase(AsyncTestCase): - def tearDown(self): - """Cleanup auto-declared queue and exchange""" - self._cconn = self.adapter(PARAMETERS, self._on_cconn_open, - self._on_cconn_error, self._on_cconn_closed) - def start(self, adapter=None): # PY3 compat encoding - self.exchange = 'e' + str(id(self)) - self.queue = 'q' + str(id(self)) + self.exchange = 'e-' + self.__class__.__name__ + ':' + uuid.uuid1().hex + self.queue = 'q-' + self.__class__.__name__ + ':' + uuid.uuid1().hex self.routing_key = self.__class__.__name__ super(BoundQueueTestCase, self).start(adapter) @@ -101,85 +126,83 @@ durable=False, auto_delete=True) - def on_exchange_declared(self, frame): + def on_exchange_declared(self, frame): # pylint: disable=W0613 self.channel.queue_declare(self.on_queue_declared, self.queue, passive=False, durable=False, exclusive=True, auto_delete=True, nowait=False, - arguments={'x-expires': self.TIMEOUT * 1000} - ) + arguments={'x-expires': self.TIMEOUT * 1000}) - def on_queue_declared(self, frame): + def on_queue_declared(self, frame): # pylint: disable=W0613 self.channel.queue_bind(self.on_ready, self.queue, self.exchange, self.routing_key) def on_ready(self, frame): raise NotImplementedError - def _on_cconn_closed(self, cconn, *args, **kwargs): - cconn.ioloop.stop() - self._cconn = None - - def _on_cconn_error(self, connection): - connection.ioloop.stop() - raise AssertionError('Error connecting to RabbitMQ') - - def _on_cconn_open(self, connection): - connection.channel(self._on_cconn_channel) - - def _on_cconn_channel(self, channel): - channel.exchange_delete(None, self.exchange, nowait=True) - channel.queue_delete(None, self.queue, nowait=True) - self._cconn.close() # # In order to write test cases that will tested using all the Async Adapters -# write a class that inherits both from one of TestCase classes above and +# write a class that inherits both from one of TestCase classes above and # from the AsyncAdapters class below. This allows you to avoid duplicating the # test methods for each adapter in each test class. # class AsyncAdapters(object): + def start(self, adapter_class): + raise NotImplementedError + def select_default_test(self): - "SelectConnection:DefaultPoller" - select_connection.POLLER_TYPE=None - self.start(adapters.SelectConnection) + """SelectConnection:DefaultPoller""" + + with mock.patch.multiple(select_connection, SELECT_TYPE=None): + self.start(adapters.SelectConnection) def select_select_test(self): - "SelectConnection:select" - select_connection.POLLER_TYPE='select' - self.start(adapters.SelectConnection) + """SelectConnection:select""" + + with mock.patch.multiple(select_connection, SELECT_TYPE='select'): + self.start(adapters.SelectConnection) - @unittest.skipIf(not hasattr(select, 'poll') - or not hasattr(select.poll(), 'modify'), "poll not supported") + @unittest.skipIf( + not hasattr(select, 'poll') or + not hasattr(select.poll(), 'modify'), "poll not supported") # pylint: disable=E1101 def select_poll_test(self): - "SelectConnection:poll" - select_connection.POLLER_TYPE='poll' - self.start(adapters.SelectConnection) + """SelectConnection:poll""" + + with mock.patch.multiple(select_connection, SELECT_TYPE='poll'): + self.start(adapters.SelectConnection) @unittest.skipIf(not hasattr(select, 'epoll'), "epoll not supported") def select_epoll_test(self): - "SelectConnection:epoll" - select_connection.POLLER_TYPE='epoll' - self.start(adapters.SelectConnection) + """SelectConnection:epoll""" + + with mock.patch.multiple(select_connection, SELECT_TYPE='epoll'): + self.start(adapters.SelectConnection) @unittest.skipIf(not hasattr(select, 'kqueue'), "kqueue not supported") def select_kqueue_test(self): - "SelectConnection:kqueue" - select_connection.POLLER_TYPE='kqueue' - self.start(adapters.SelectConnection) + """SelectConnection:kqueue""" + + with mock.patch.multiple(select_connection, SELECT_TYPE='kqueue'): + self.start(adapters.SelectConnection) def tornado_test(self): - "TornadoConnection" + """TornadoConnection""" self.start(adapters.TornadoConnection) - @unittest.skipIf(target == 'PyPy', 'PyPy is not supported') + @unittest.skipIf(sys.version_info < (3, 4), "Asyncio available for Python 3.4+") + def asyncio_test(self): + """AsyncioConnection""" + self.start(adapters.AsyncioConnection) + + @unittest.skipIf(_TARGET == 'PyPy', 'PyPy is not supported') @unittest.skipIf(adapters.LibevConnection is None, 'pyev is not installed') def libev_test(self): - "LibevConnection" + """LibevConnection""" self.start(adapters.LibevConnection) diff -Nru python-pika-0.10.0/tests/acceptance/blocking_adapter_test.py python-pika-0.11.0/tests/acceptance/blocking_adapter_test.py --- python-pika-0.10.0/tests/acceptance/blocking_adapter_test.py 2015-09-02 17:29:24.000000000 +0000 +++ python-pika-0.11.0/tests/acceptance/blocking_adapter_test.py 2017-08-29 16:54:39.000000000 +0000 @@ -11,6 +11,7 @@ import uuid from forward_server import ForwardServer +from test_utils import retry_assertion import pika from pika.adapters import blocking_connection @@ -33,7 +34,9 @@ # Disable warning Invalid variable name # pylint: disable=C0103 + LOGGER = logging.getLogger(__name__) + PARAMS_URL_TEMPLATE = ( 'amqp://guest:guest@127.0.0.1:%(port)s/%%2f?socket_timeout=1') DEFAULT_URL = PARAMS_URL_TEMPLATE % {'port': 5672} @@ -41,6 +44,11 @@ DEFAULT_TIMEOUT = 15 + +def setUpModule(): + logging.basicConfig(level=logging.DEBUG) + + class BlockingTestCaseBase(unittest.TestCase): TIMEOUT = DEFAULT_TIMEOUT @@ -65,6 +73,14 @@ LOGGER.info('%s TIMED OUT (%s)', datetime.utcnow(), self) self.fail('Test timed out') + @retry_assertion(TIMEOUT/2) + def _assert_exact_message_count_with_retries(self, + channel, + queue, + expected_count): + frame = channel.queue_declare(queue, passive=True) + self.assertEqual(frame.method.message_count, expected_count) + class TestCreateAndCloseConnection(BlockingTestCaseBase): @@ -82,6 +98,25 @@ self.assertFalse(connection.is_closing) +class TestMultiCloseConnection(BlockingTestCaseBase): + + def test(self): + """BlockingConnection: Close connection twice""" + connection = self._connect() + self.assertIsInstance(connection, pika.BlockingConnection) + self.assertTrue(connection.is_open) + self.assertFalse(connection.is_closed) + self.assertFalse(connection.is_closing) + + connection.close() + self.assertTrue(connection.is_closed) + self.assertFalse(connection.is_open) + self.assertFalse(connection.is_closing) + + # Second close call shouldn't crash + connection.close() + + class TestConnectionContextManagerClosesConnection(BlockingTestCaseBase): def test(self): """BlockingConnection: connection context manager closes connection""" @@ -94,7 +129,7 @@ class TestConnectionContextManagerClosesConnectionAndPassesOriginalException(BlockingTestCaseBase): def test(self): - """BlockingConnection: connection context manager closes connection and passes original exception""" + """BlockingConnection: connection context manager closes connection and passes original exception""" # pylint: disable=C0301 class MyException(Exception): pass @@ -109,7 +144,7 @@ class TestConnectionContextManagerClosesConnectionAndPassesSystemException(BlockingTestCaseBase): def test(self): - """BlockingConnection: connection context manager closes connection and passes system exception""" + """BlockingConnection: connection context manager closes connection and passes system exception""" # pylint: disable=C0301 with self.assertRaises(SystemExit): with self._connect() as connection: self.assertTrue(connection.is_open) @@ -178,7 +213,10 @@ def test(self): """BlockingConnection resets properly on TCP/IP drop during channel() """ - with ForwardServer((DEFAULT_PARAMS.host, DEFAULT_PARAMS.port)) as fwd: + with ForwardServer( + remote_addr=(DEFAULT_PARAMS.host, DEFAULT_PARAMS.port), + local_linger_args=(1, 0)) as fwd: + self.connection = self._connect( PARAMS_URL_TEMPLATE % {"port": fwd.server_address[1]}) @@ -198,7 +236,10 @@ def test(self): """BlockingConnection no access file descriptor after ConnectionClosed """ - with ForwardServer((DEFAULT_PARAMS.host, DEFAULT_PARAMS.port)) as fwd: + with ForwardServer( + remote_addr=(DEFAULT_PARAMS.host, DEFAULT_PARAMS.port), + local_linger_args=(1, 0)) as fwd: + self.connection = self._connect( PARAMS_URL_TEMPLATE % {"port": fwd.server_address[1]}) @@ -244,7 +285,10 @@ def test(self): """ BlockingConnection TCP/IP connection loss in CONNECTION_START """ - fwd = ForwardServer((DEFAULT_PARAMS.host, DEFAULT_PARAMS.port)) + fwd = ForwardServer( + remote_addr=(DEFAULT_PARAMS.host, DEFAULT_PARAMS.port), + local_linger_args=(1, 0)) + fwd.start() self.addCleanup(lambda: fwd.stop() if fwd.running else None) @@ -267,7 +311,9 @@ def test(self): """ BlockingConnection TCP/IP connection loss in CONNECTION_TUNE """ - fwd = ForwardServer((DEFAULT_PARAMS.host, DEFAULT_PARAMS.port)) + fwd = ForwardServer( + remote_addr=(DEFAULT_PARAMS.host, DEFAULT_PARAMS.port), + local_linger_args=(1, 0)) fwd.start() self.addCleanup(lambda: fwd.stop() if fwd.running else None) @@ -290,7 +336,10 @@ def test(self): """ BlockingConnection TCP/IP connection loss in CONNECTION_PROTOCOL """ - fwd = ForwardServer((DEFAULT_PARAMS.host, DEFAULT_PARAMS.port)) + fwd = ForwardServer( + remote_addr=(DEFAULT_PARAMS.host, DEFAULT_PARAMS.port), + local_linger_args=(1, 0)) + fwd.start() self.addCleanup(lambda: fwd.stop() if fwd.running else None) @@ -327,7 +376,7 @@ self.assertLess(elapsed, 0.25) -class TestConnectionBlockAndUnblock(BlockingTestCaseBase): +class TestConnectionRegisterForBlockAndUnblock(BlockingTestCaseBase): def test(self): """BlockingConnection register for Connection.Blocked/Unblocked""" @@ -358,6 +407,35 @@ self.assertEqual(unblocked_buffer, ["unblocked"]) +class TestBlockedConnectionTimeout(BlockingTestCaseBase): + + def test(self): + """BlockingConnection Connection.Blocked timeout """ + url = DEFAULT_URL + '&blocked_connection_timeout=0.001' + conn = self._connect(url=url) + + # NOTE: I haven't figured out yet how to coerce RabbitMQ to emit + # Connection.Block and Connection.Unblock from the test, so we'll + # simulate it for now + + # Simulate Connection.Blocked + conn._impl._on_connection_blocked(pika.frame.Method( + 0, + pika.spec.Connection.Blocked('TestBlockedConnectionTimeout'))) + + # Wait for connection teardown + with self.assertRaises(pika.exceptions.ConnectionClosed) as excCtx: + while True: + conn.process_data_events(time_limit=1) + + self.assertEqual( + excCtx.exception.args, + (pika.connection.InternalCloseReasons.BLOCKED_CONNECTION_TIMEOUT, + 'Blocked connection timeout expired')) + + + + class TestAddTimeoutRemoveTimeout(BlockingTestCaseBase): def test(self): @@ -415,7 +493,7 @@ while not rx_timer2: connection.process_data_events(time_limit=None) - self.assertNotIn(timer_id1, connection._impl.ioloop._timeouts) + self.assertNotIn(timer_id1, connection._impl.ioloop._poller._timeouts) self.assertFalse(connection._ready_events) @@ -557,8 +635,9 @@ mandatory=True) # Check that the queue now has one message - frame = ch.queue_declare(q_name, passive=True) - self.assertEqual(frame.method.message_count, 1) + self._assert_exact_message_count_with_retries(channel=ch, + queue=q_name, + expected_count=1) # Unbind the exchanges frame = ch.exchange_unbind(destination=dest_exg_name, @@ -740,10 +819,9 @@ LOGGER.info('%s ACKED (%s)', datetime.utcnow(), self) # Verify that the queue is now empty - frame = ch.queue_declare(q_name, passive=True) - LOGGER.info('%s DECLARE PASSIVE QUEUE DONE (%s)', - datetime.utcnow(), self) - self.assertEqual(frame.method.message_count, 0) + self._assert_exact_message_count_with_retries(channel=ch, + queue=q_name, + expected_count=0) class TestBasicReject(BlockingTestCaseBase): @@ -785,8 +863,9 @@ # Verify that exactly one message is present in the queue, namely the # second one - frame = ch.queue_declare(q_name, passive=True) - self.assertEqual(frame.method.message_count, 1) + self._assert_exact_message_count_with_retries(channel=ch, + queue=q_name, + expected_count=1) (rx_method, _, rx_body) = ch.basic_get(q_name, no_ack=False) self.assertEqual(rx_body, as_bytes('TestBasicReject2')) @@ -831,8 +910,9 @@ ch.basic_reject(rx_method.delivery_tag, requeue=False) # Verify that no messages are present in the queue - frame = ch.queue_declare(q_name, passive=True) - self.assertEqual(frame.method.message_count, 0) + self._assert_exact_message_count_with_retries(channel=ch, + queue=q_name, + expected_count=0) class TestBasicNack(BlockingTestCaseBase): @@ -874,8 +954,9 @@ # Verify that exactly one message is present in the queue, namely the # second one - frame = ch.queue_declare(q_name, passive=True) - self.assertEqual(frame.method.message_count, 1) + self._assert_exact_message_count_with_retries(channel=ch, + queue=q_name, + expected_count=1) (rx_method, _, rx_body) = ch.basic_get(q_name, no_ack=False) self.assertEqual(rx_body, as_bytes('TestBasicNack2')) @@ -920,8 +1001,9 @@ ch.basic_nack(rx_method.delivery_tag, requeue=False) # Verify that no messages are present in the queue - frame = ch.queue_declare(q_name, passive=True) - self.assertEqual(frame.method.message_count, 0) + self._assert_exact_message_count_with_retries(channel=ch, + queue=q_name, + expected_count=0) class TestBasicNackMultiple(BlockingTestCaseBase): @@ -964,8 +1046,9 @@ ch.basic_nack(rx_method.delivery_tag, multiple=True, requeue=True) # Verify that both messages are present in the queue - frame = ch.queue_declare(q_name, passive=True) - self.assertEqual(frame.method.message_count, 2) + self._assert_exact_message_count_with_retries(channel=ch, + queue=q_name, + expected_count=2) (rx_method, _, rx_body) = ch.basic_get(q_name, no_ack=False) self.assertEqual(rx_body, as_bytes('TestBasicNackMultiple1')) @@ -1365,8 +1448,7 @@ self.addCleanup(self._connect().channel().queue_delete, q_name) # Bind the queue to the exchange using routing key - frame = ch.queue_bind(q_name, exchange=exg_name, - routing_key=routing_key) + ch.queue_bind(q_name, exchange=exg_name, routing_key=routing_key) # Attempt to send an unroutable message in the queue via basic_publish res = ch.basic_publish(exg_name, routing_key='', @@ -1384,11 +1466,9 @@ self.assertEqual(res, True) # Wait for the queue to get the routable message - while ch.queue_declare(q_name, passive=True).method.message_count < 1: - pass - - self.assertEqual( - ch.queue_declare(q_name, passive=True).method.message_count, 1) + self._assert_exact_message_count_with_retries(channel=ch, + queue=q_name, + expected_count=1) msg = ch.basic_get(q_name) @@ -1411,8 +1491,9 @@ ch.basic_ack(delivery_tag=rx_method.delivery_tag, multiple=False) # Verify that the queue is now empty - frame = ch.queue_declare(q_name, passive=True) - self.assertEqual(frame.method.message_count, 0) + self._assert_exact_message_count_with_retries(channel=ch, + queue=q_name, + expected_count=0) class TestPublishAndConsumeWithPubacksAndQosOfOne(BlockingTestCaseBase): @@ -1443,8 +1524,7 @@ self.addCleanup(self._connect().channel().queue_delete, q_name) # Bind the queue to the exchange using routing key - frame = ch.queue_bind(q_name, exchange=exg_name, - routing_key=routing_key) + ch.queue_bind(q_name, exchange=exg_name, routing_key=routing_key) # Deposit a message in the queue via basic_publish msg1_headers = dict( @@ -1531,8 +1611,9 @@ ch.basic_ack(delivery_tag=rx_method.delivery_tag, multiple=False) # Verify that the queue is now empty - frame = ch.queue_declare(q_name, passive=True) - self.assertEqual(frame.method.message_count, 0) + self._assert_exact_message_count_with_retries(channel=ch, + queue=q_name, + expected_count=0) # Attempt to cosume again with a short timeout connection.process_data_events(time_limit=0.005) @@ -1549,6 +1630,111 @@ self.assertEqual(frame.method.consumer_tag, consumer_tag) +class TestTwoBasicConsumersOnSameChannel(BlockingTestCaseBase): + + def test(self): # pylint: disable=R0914 + """BlockingChannel: two basic_consume consumers on same channel + """ + connection = self._connect() + + ch = connection.channel() + + exg_name = 'TestPublishAndConsumeAndQos_exg_' + uuid.uuid1().hex + q1_name = 'TestTwoBasicConsumersOnSameChannel_q1' + uuid.uuid1().hex + q2_name = 'TestTwoBasicConsumersOnSameChannel_q2' + uuid.uuid1().hex + q1_routing_key = 'TestTwoBasicConsumersOnSameChannel1' + q2_routing_key = 'TestTwoBasicConsumersOnSameChannel2' + + # Place channel in publisher-acknowledgments mode so that publishing + # with mandatory=True will be synchronous + ch.confirm_delivery() + + # Declare a new exchange + ch.exchange_declare(exg_name, exchange_type='direct') + self.addCleanup(connection.channel().exchange_delete, exg_name) + + # Declare the two new queues and bind them to the exchange + ch.queue_declare(q1_name, auto_delete=True) + self.addCleanup(self._connect().channel().queue_delete, q1_name) + ch.queue_bind(q1_name, exchange=exg_name, routing_key=q1_routing_key) + + ch.queue_declare(q2_name, auto_delete=True) + self.addCleanup(self._connect().channel().queue_delete, q2_name) + ch.queue_bind(q2_name, exchange=exg_name, routing_key=q2_routing_key) + + # Deposit messages in the queues + q1_tx_message_bodies = ['q1_message+%s' % (i,) + for i in pika.compat.xrange(100)] + for message_body in q1_tx_message_bodies: + ch.publish(exg_name, q1_routing_key, body=message_body, + mandatory=True) + + q2_tx_message_bodies = ['q2_message+%s' % (i,) + for i in pika.compat.xrange(150)] + for message_body in q2_tx_message_bodies: + ch.publish(exg_name, q2_routing_key, body=message_body, + mandatory=True) + + # Create the consumers + q1_rx_messages = [] + q1_consumer_tag = ch.basic_consume( + lambda *args: q1_rx_messages.append(args), + q1_name, + no_ack=False, + exclusive=False, + arguments=None) + + q2_rx_messages = [] + q2_consumer_tag = ch.basic_consume( + lambda *args: q2_rx_messages.append(args), + q2_name, + no_ack=False, + exclusive=False, + arguments=None) + + # Wait for all messages to be delivered + while (len(q1_rx_messages) < len(q1_tx_message_bodies) or + len(q2_rx_messages) < len(q2_tx_message_bodies)): + connection.process_data_events(time_limit=None) + + self.assertEqual(len(q2_rx_messages), len(q2_tx_message_bodies)) + + # Verify the messages + def validate_messages(rx_messages, + routing_key, + consumer_tag, + tx_message_bodies): + self.assertEqual(len(rx_messages), len(tx_message_bodies)) + + for msg, expected_body in zip(rx_messages, tx_message_bodies): + self.assertIsInstance(msg, tuple) + rx_ch, rx_method, rx_properties, rx_body = msg + self.assertIs(rx_ch, ch) + self.assertIsInstance(rx_method, pika.spec.Basic.Deliver) + self.assertEqual(rx_method.consumer_tag, consumer_tag) + self.assertFalse(rx_method.redelivered) + self.assertEqual(rx_method.exchange, exg_name) + self.assertEqual(rx_method.routing_key, routing_key) + + self.assertIsInstance(rx_properties, pika.BasicProperties) + self.assertEqual(rx_body, as_bytes(expected_body)) + + # Validate q1 consumed messages + validate_messages(rx_messages=q1_rx_messages, + routing_key=q1_routing_key, + consumer_tag=q1_consumer_tag, + tx_message_bodies=q1_tx_message_bodies) + + # Validate q2 consumed messages + validate_messages(rx_messages=q2_rx_messages, + routing_key=q2_routing_key, + consumer_tag=q2_consumer_tag, + tx_message_bodies=q2_tx_message_bodies) + + # There shouldn't be any more events now + self.assertFalse(ch._pending_events) + + class TestBasicCancelPurgesPendingConsumerCancellationEvt(BlockingTestCaseBase): def test(self): @@ -1622,8 +1808,7 @@ self.addCleanup(self._connect().channel().queue_delete, q_name) # Bind the queue to the exchange using routing key - frame = ch.queue_bind(q_name, exchange=exg_name, - routing_key=routing_key) + ch.queue_bind(q_name, exchange=exg_name, routing_key=routing_key) # Deposit a message in the queue via basic_publish and mandatory=True msg1_headers = dict( @@ -1638,12 +1823,13 @@ # Deposit a message in the queue via basic_publish and mandatory=False res = ch.basic_publish(exg_name, routing_key=routing_key, body='via-basic_publish_mandatory=False', - mandatory=True) + mandatory=False) self.assertEqual(res, True) # Wait for the messages to arrive in queue - while ch.queue_declare(q_name, passive=True).method.message_count != 2: - pass + self._assert_exact_message_count_with_retries(channel=ch, + queue=q_name, + expected_count=2) # Create a consumer rx_messages = [] @@ -1709,8 +1895,9 @@ ch.basic_ack(delivery_tag=rx_method.delivery_tag, multiple=False) # Verify that the queue is now empty - frame = ch.queue_declare(q_name, passive=True) - self.assertEqual(frame.method.message_count, 0) + self._assert_exact_message_count_with_retries(channel=ch, + queue=q_name, + expected_count=0) # Attempt to cosume again with a short timeout connection.process_data_events(time_limit=0.005) @@ -1761,8 +1948,7 @@ arguments=None) # Consume from destination queue - for _, _, rx_body in ch.consume(dest_q_name, - no_ack=True): + for _, _, rx_body in ch.consume(dest_q_name, no_ack=True): self.assertEqual(rx_body, as_bytes('via-publish')) break else: @@ -1977,8 +2163,9 @@ # Verify that the queue is now empty ch.close() ch = connection.channel() - frame = ch.queue_declare(q_name, passive=True) - self.assertEqual(frame.method.message_count, 0) + self._assert_exact_message_count_with_retries(channel=ch, + queue=q_name, + expected_count=0) class TestNonPubackPublishAndConsumeManyMessages(BlockingTestCaseBase): @@ -2034,8 +2221,9 @@ # Verify that the queue is now empty ch = connection.channel() - frame = ch.queue_declare(q_name, passive=True) - self.assertEqual(frame.method.message_count, 0) + self._assert_exact_message_count_with_retries(channel=ch, + queue=q_name, + expected_count=0) class TestBasicCancelWithNonAckableConsumer(BlockingTestCaseBase): @@ -2061,16 +2249,18 @@ ch.publish(exchange='', routing_key=q_name, body=body2) # Wait for queue to contain both messages - while ch.queue_declare(q_name, passive=True).method.message_count != 2: - pass + self._assert_exact_message_count_with_retries(channel=ch, + queue=q_name, + expected_count=2) # Create a non-ackable consumer consumer_tag = ch.basic_consume(lambda *x: None, q_name, no_ack=True, exclusive=False, arguments=None) # Wait for all messages to be sent by broker to client - while ch.queue_declare(q_name, passive=True).method.message_count > 0: - pass + self._assert_exact_message_count_with_retries(channel=ch, + queue=q_name, + expected_count=0) # Cancel the consumer messages = ch.basic_cancel(consumer_tag) @@ -2088,7 +2278,7 @@ ch = connection.channel() - # Verify that the queue is now empty; this validates the multi-ack + # Verify that the queue is now empty frame = ch.queue_declare(q_name, passive=True) self.assertEqual(frame.method.message_count, 0) @@ -2116,16 +2306,18 @@ ch.publish(exchange='', routing_key=q_name, body=body2) # Wait for queue to contain both messages - while ch.queue_declare(q_name, passive=True).method.message_count != 2: - pass + self._assert_exact_message_count_with_retries(channel=ch, + queue=q_name, + expected_count=2) # Create an ackable consumer consumer_tag = ch.basic_consume(lambda *x: None, q_name, no_ack=False, exclusive=False, arguments=None) # Wait for all messages to be sent by broker to client - while ch.queue_declare(q_name, passive=True).method.message_count > 0: - pass + self._assert_exact_message_count_with_retries(channel=ch, + queue=q_name, + expected_count=0) # Cancel the consumer messages = ch.basic_cancel(consumer_tag) @@ -2137,9 +2329,10 @@ ch = connection.channel() - # Verify that the queue is now empty; this validates the multi-ack - frame = ch.queue_declare(q_name, passive=True) - self.assertEqual(frame.method.message_count, 2) + # Verify that canceling the ackable consumer restored both messages + self._assert_exact_message_count_with_retries(channel=ch, + queue=q_name, + expected_count=2) class TestUnackedMessageAutoRestoredToQueueOnChannelClose(BlockingTestCaseBase): @@ -2187,8 +2380,9 @@ # Verify that there are two messages in q now ch = connection.channel() - frame = ch.queue_declare(q_name, passive=True) - self.assertEqual(frame.method.message_count, 2) + self._assert_exact_message_count_with_retries(channel=ch, + queue=q_name, + expected_count=2) class TestNoAckMessageNotRestoredToQueueOnChannelClose(BlockingTestCaseBase): @@ -2215,9 +2409,7 @@ # Consume, but don't ack num_messages = 0 - for rx_method, _, _ in ch.consume(q_name, - no_ack=True, - exclusive=False): + for rx_method, _, _ in ch.consume(q_name, no_ack=True, exclusive=False): num_messages += 1 self.assertEqual(rx_method.delivery_tag, num_messages) diff -Nru python-pika-0.10.0/tests/acceptance/enforce_one_basicget_test.py python-pika-0.11.0/tests/acceptance/enforce_one_basicget_test.py --- python-pika-0.10.0/tests/acceptance/enforce_one_basicget_test.py 1970-01-01 00:00:00.000000000 +0000 +++ python-pika-0.11.0/tests/acceptance/enforce_one_basicget_test.py 2017-08-29 16:54:39.000000000 +0000 @@ -0,0 +1,32 @@ +try: + import unittest2 as unittest +except ImportError: + import unittest + +from mock import MagicMock +from pika.frame import Method, Header +from pika.exceptions import DuplicateGetOkCallback +from pika.channel import Channel +from pika.connection import Connection + + +class OnlyOneBasicGetTestCase(unittest.TestCase): + def setUp(self): + self.channel = Channel(MagicMock(Connection)(), 0, None) + self.channel._state = Channel.OPEN + self.callback = MagicMock() + + def test_two_basic_get_with_callback(self): + self.channel.basic_get(self.callback) + self.channel._on_getok(MagicMock(Method)(), MagicMock(Header)(), '') + self.channel.basic_get(self.callback) + self.channel._on_getok(MagicMock(Method)(), MagicMock(Header)(), '') + self.assertEqual(self.callback.call_count, 2) + + def test_two_basic_get_without_callback(self): + self.channel.basic_get(self.callback) + with self.assertRaises(DuplicateGetOkCallback): + self.channel.basic_get(self.callback) + +if __name__ == '__main__': + unittest.main() diff -Nru python-pika-0.10.0/tests/acceptance/forward_server.py python-pika-0.11.0/tests/acceptance/forward_server.py --- python-pika-0.10.0/tests/acceptance/forward_server.py 2015-09-02 17:29:24.000000000 +0000 +++ python-pika-0.11.0/tests/acceptance/forward_server.py 2017-08-29 16:54:39.000000000 +0000 @@ -5,10 +5,12 @@ import array from datetime import datetime import errno +from functools import partial import logging import multiprocessing import os import socket +import struct import sys import threading import traceback @@ -32,7 +34,7 @@ -class ForwardServer(object): +class ForwardServer(object): # pylint: disable=R0902 """ Implement a TCP/IP forwarding/echo service for testing. Listens for an incoming TCP/IP connection, accepts it, then connects to the given remote address and forwards data back and forth between the two @@ -47,7 +49,7 @@ with ForwardServer(("localhost", 5672)) as fwd: params = pika.ConnectionParameters( - host="localhost", + host=fwd.server_address[0], port=fwd.server_address[1]) conn = pika.BlockingConnection(params) @@ -57,14 +59,20 @@ channel = conn.channel() Echo server example - def talk_to_echo_server(port): - pass + def produce(sock): + sock.sendall("12345") + sock.shutdown(socket.SHUT_WR) with ForwardServer(None) as fwd: - worker = threading.Thread(target=talk_to_echo_server, - args=[fwd.server_address[1]]) + sock = socket.socket() + sock.connect(fwd.server_address) + + worker = threading.Thread(target=produce, + args=[sock]) worker.start() - time.sleep(5) + + data = sock.makefile().read() + assert data == "12345", data worker.join() @@ -73,13 +81,14 @@ _SUBPROC_TIMEOUT = 10 - def __init__(self, + def __init__(self, # pylint: disable=R0913 remote_addr, remote_addr_family=socket.AF_INET, remote_socket_type=socket.SOCK_STREAM, server_addr=("127.0.0.1", 0), server_addr_family=socket.AF_INET, - server_socket_type=socket.SOCK_STREAM): + server_socket_type=socket.SOCK_STREAM, + local_linger_args=None): """ :param tuple remote_addr: remote server's IP address, whose structure depends on remote_addr_family; pair (host-or-ip-addr, port-number). @@ -96,6 +105,11 @@ socket.AF_UNIX; defaults to socket.AF_INET :param server_socket_type: only socket.SOCK_STREAM is supported at this time + :param tuple local_linger_args: SO_LINGER sockoverride for the local + connection sockets, to be configured after connection is accepted. + None for default, which is to not change the SO_LINGER option. + Otherwise, its a two-tuple, where the first element is the `l_onoff` + switch, and the second element is the `l_linger` value, in seconds """ self._logger = logging.getLogger(__name__) @@ -114,6 +128,8 @@ assert server_socket_type == socket.SOCK_STREAM, server_socket_type self._server_socket_type = server_socket_type + self._local_linger_args = local_linger_args + self._subproc = None @@ -168,7 +184,7 @@ :returns: self """ - q = multiprocessing.Queue() + queue = multiprocessing.Queue() self._subproc = multiprocessing.Process( target=_run_server, @@ -176,18 +192,20 @@ local_addr=self._server_addr, local_addr_family=self._server_addr_family, local_socket_type=self._server_socket_type, + local_linger_args=self._local_linger_args, remote_addr=self._remote_addr, remote_addr_family=self._remote_addr_family, remote_socket_type=self._remote_socket_type, - q=q)) + queue=queue)) self._subproc.daemon = True self._subproc.start() try: # Get server socket info from subprocess - self._server_addr_family, self._server_addr = q.get( + self._server_addr_family, self._server_addr = queue.get( block=True, timeout=self._SUBPROC_TIMEOUT) + queue.close() except Exception: # pylint: disable=W0703 try: self._logger.exception( @@ -213,7 +231,6 @@ ForwardServer. start()/stop() are alternatives to the context manager use case and are mutually exclusive with it. """ - _trace("ForwardServer STOPPING") self._logger.info("ForwardServer STOPPING") try: @@ -236,26 +253,35 @@ -def _run_server(local_addr, local_addr_family, local_socket_type, - remote_addr, remote_addr_family, remote_socket_type, q): +def _run_server(local_addr, local_addr_family, local_socket_type, # pylint: disable=R0913 + local_linger_args, remote_addr, remote_addr_family, + remote_socket_type, queue): """ Run the server; executed in the subprocess :param local_addr: listening address :param local_addr_family: listening address family; one of socket.AF_* :param local_socket_type: listening socket type; typically socket.SOCK_STREAM - :param remote_addr: address of the target server + :param tuple local_linger_args: SO_LINGER sockoverride for the local + connection sockets, to be configured after connection is accepted. + Pass None to not change SO_LINGER. Otherwise, its a two-tuple, where the + first element is the `l_onoff` switch, and the second element is the + `l_linger` value in seconds + :param remote_addr: address of the target server. Pass None to have + ForwardServer behave as echo server :param remote_addr_family: address family for connecting to target server; one of socket.AF_* :param remote_socket_type: socket type for connecting to target server; typically socket.SOCK_STREAM - :param multiprocessing.Queue q: queue for depositing the forwarding server's - actual listening socket address family and bound address. The parent - process waits for this. + :param multiprocessing.Queue queue: queue for depositing the forwarding + server's actual listening socket address family and bound address. The + parent process waits for this. """ # NOTE: We define _ThreadedTCPServer class as a closure in order to # override some of its class members dynamically + # NOTE: we add `object` to the base classes because `_ThreadedTCPServer` + # isn't derived from `object`, which prevents `super` from working properly class _ThreadedTCPServer(SocketServer.ThreadingMixIn, SocketServer.TCPServer, object): @@ -267,38 +293,35 @@ allow_reuse_address = True - def __init__(self, - remote_addr, - remote_addr_family, - remote_socket_type): - self.remote_addr = remote_addr - self.remote_addr_family = remote_addr_family - self.remote_socket_type = remote_socket_type + def __init__(self): + + handler_class_factory = partial( + _TCPHandler, + local_linger_args=local_linger_args, + remote_addr=remote_addr, + remote_addr_family=remote_addr_family, + remote_socket_type=remote_socket_type) super(_ThreadedTCPServer, self).__init__( local_addr, - _TCPHandler, + handler_class_factory, bind_and_activate=True) - server = _ThreadedTCPServer(remote_addr, - remote_addr_family, - remote_socket_type) + server = _ThreadedTCPServer() # Send server socket info back to parent process - q.put([server.socket.family, server.server_address]) - q.close() + queue.put([server.socket.family, server.server_address]) -## # Validate server's socket fileno -## _trace("Checking server fd=%s after q.put", server.socket.fileno()) -## fcntl.fcntl(server.socket.fileno(), fcntl.F_GETFL) -## _trace("Server fd is OK after q.put") + queue.close() server.serve_forever() -class _TCPHandler(SocketServer.StreamRequestHandler): +# NOTE: we add `object` to the base classes because `StreamRequestHandler` isn't +# derived from `object`, which prevents `super` from working properly +class _TCPHandler(SocketServer.StreamRequestHandler, object): """TCP/IP session handler instantiated by TCPServer upon incoming connection. Implements forwarding/echo of the incoming connection. """ @@ -306,56 +329,93 @@ _SOCK_RX_BUF_SIZE = 16 * 1024 - def handle(self): - try: - local_sock = self.connection + def __init__(self, # pylint: disable=R0913 + request, + client_address, + server, + local_linger_args, + remote_addr, + remote_addr_family, + remote_socket_type): + """ + :param request: for super + :param client_address: for super + "paarm server: for super + :param tuple local_linger_args: SO_LINGER sockoverride for the local + connection sockets, to be configured after connection is accepted. + Pass None to not change SO_LINGER. Otherwise, its a two-tuple, where + the first element is the `l_onoff` switch, and the second element is + the `l_linger` value in seconds + :param remote_addr: address of the target server. Pass None to have + ForwardServer behave as echo server. + :param remote_addr_family: address family for connecting to target + server; one of socket.AF_* + :param remote_socket_type: socket type for connecting to target server; + typically socket.SOCK_STREAM + :param **kwargs: kwargs for super class + """ + self._local_linger_args = local_linger_args + self._remote_addr = remote_addr + self._remote_addr_family = remote_addr_family + self._remote_socket_type = remote_socket_type - if self.server.remote_addr is not None: - # Forwarding set-up - remote_dest_sock = remote_src_sock = socket.socket( - family=self.server.remote_addr_family, - type=self.server.remote_socket_type, - proto=socket.IPPROTO_IP) - remote_dest_sock.connect(self.server.remote_addr) - _trace("%s _TCPHandler connected to remote %s", - datetime.utcnow(), remote_dest_sock.getpeername()) - else: - # Echo set-up - remote_dest_sock, remote_src_sock = socket_pair() + super(_TCPHandler, self).__init__(request=request, + client_address=client_address, + server=server) + + + def handle(self): # pylint: disable=R0912 + """Connect to remote and forward data between local and remote""" + local_sock = self.connection + + if self._local_linger_args is not None: + # Set SO_LINGER socket options on local socket + l_onoff, l_linger = self._local_linger_args + local_sock.setsockopt(socket.SOL_SOCKET, socket.SO_LINGER, + struct.pack('ii', l_onoff, l_linger)) + + if self._remote_addr is not None: + # Forwarding set-up + remote_dest_sock = remote_src_sock = socket.socket( + family=self._remote_addr_family, + type=self._remote_socket_type, + proto=socket.IPPROTO_IP) + remote_dest_sock.connect(self._remote_addr) + _trace("%s _TCPHandler connected to remote %s", + datetime.utcnow(), remote_dest_sock.getpeername()) + else: + # Echo set-up + remote_dest_sock, remote_src_sock = socket_pair() - try: - local_forwarder = threading.Thread( - target=self._forward, - args=(local_sock, remote_dest_sock,)) - local_forwarder.setDaemon(True) - local_forwarder.start() + try: + local_forwarder = threading.Thread( + target=self._forward, + args=(local_sock, remote_dest_sock,)) + local_forwarder.setDaemon(True) + local_forwarder.start() - try: - self._forward(remote_src_sock, local_sock) - finally: - # Wait for local forwarder thread to exit - local_forwarder.join() + try: + self._forward(remote_src_sock, local_sock) finally: + # Wait for local forwarder thread to exit + local_forwarder.join() + finally: + try: try: - try: - _safe_shutdown_socket(remote_dest_sock, - socket.SHUT_RDWR) - finally: - if remote_src_sock is not remote_dest_sock: - _safe_shutdown_socket(remote_src_sock, - socket.SHUT_RDWR) + _safe_shutdown_socket(remote_dest_sock, + socket.SHUT_RDWR) finally: - remote_dest_sock.close() if remote_src_sock is not remote_dest_sock: - remote_src_sock.close() - except: - _trace("handle failed:\n%s", "".join(traceback.format_exc())) - raise + _safe_shutdown_socket(remote_src_sock, + socket.SHUT_RDWR) + finally: + remote_dest_sock.close() + if remote_src_sock is not remote_dest_sock: + remote_src_sock.close() - def _forward(self, src_sock, dest_sock): + def _forward(self, src_sock, dest_sock): # pylint: disable=R0912 """Forward from src_sock to dest_sock""" - src_peername = src_sock.getpeername() _trace("%s forwarding from %s to %s", datetime.utcnow(), @@ -371,17 +431,17 @@ while True: try: nbytes = src_sock.recv_into(rx_buf) - except socket.error as e: - if e.errno == errno.EINTR: + except socket.error as exc: + if exc.errno == errno.EINTR: continue - elif e.errno == errno.ECONNRESET: + elif exc.errno == errno.ECONNRESET: # Source peer forcibly closed connection _trace("%s errno.ECONNRESET from %s", datetime.utcnow(), src_peername) break else: _trace("%s Unexpected errno=%s from %s\n%s", - datetime.utcnow(), e.errno, src_peername, + datetime.utcnow(), exc.errno, src_peername, "".join(traceback.format_stack())) raise @@ -392,14 +452,14 @@ try: dest_sock.sendall(buffer(rx_buf, 0, nbytes)) - except socket.error as e: - if e.errno == errno.EPIPE: + except socket.error as exc: + if exc.errno == errno.EPIPE: # Destination peer closed its end of the connection _trace("%s Destination peer %s closed its end of " "the connection: errno.EPIPE", datetime.utcnow(), dest_sock.getpeername()) break - elif e.errno == errno.ECONNRESET: + elif exc.errno == errno.ECONNRESET: # Destination peer forcibly closed connection _trace("%s Destination peer %s forcibly closed " "connection: errno.ECONNRESET", @@ -408,7 +468,7 @@ else: _trace( "%s Unexpected errno=%s in sendall to %s\n%s", - datetime.utcnow(), e.errno, + datetime.utcnow(), exc.errno, dest_sock.getpeername(), "".join(traceback.format_stack())) raise @@ -450,8 +510,8 @@ while True: try: data = sock.recv(4 * 1024) # pylint: disable=E1101 - except socket.error as e: - if e.errno == errno.EINTR: + except socket.error as exc: + if exc.errno == errno.EINTR: continue else: raise @@ -473,8 +533,8 @@ """ try: sock.shutdown(how) - except socket.error as e: - if e.errno != errno.ENOTCONN: + except socket.error as exc: + if exc.errno != errno.ENOTCONN: raise diff -Nru python-pika-0.10.0/tests/acceptance/test_utils.py python-pika-0.11.0/tests/acceptance/test_utils.py --- python-pika-0.10.0/tests/acceptance/test_utils.py 1970-01-01 00:00:00.000000000 +0000 +++ python-pika-0.11.0/tests/acceptance/test_utils.py 2017-08-29 16:54:39.000000000 +0000 @@ -0,0 +1,73 @@ +"""Acceptance test utils""" + +import functools +import logging +import time +import traceback + + +def retry_assertion(timeout_sec, retry_interval_sec=0.1): + """Creates a decorator that retries the decorated function or + method only upon `AssertionError` exception at the given retry interval + not to exceed the overall given timeout. + + :param float timeout_sec: overall timeout in seconds + :param float retry_interval_sec: amount of time to sleep + between retries in seconds. + + :returns: decorator that implements the following behavior + + 1. This decorator guarantees to call the decorated function or method at + least once. + 2. It passes through all exceptions besides `AssertionError`, preserving the + original exception and its traceback. + 3. If no exception, it returns the return value from the decorated function/method. + 4. It sleeps `time.sleep(retry_interval_sec)` between retries. + 5. It checks for expiry of the overall timeout before sleeping. + 6. If the overall timeout is exceeded, it re-raises the latest `AssertionError`, + preserving its original traceback + """ + + def retry_assertion_decorator(func): + """Decorator""" + + @functools.wraps(func) + def retry_assertion_wrap(*args, **kwargs): + """The wrapper""" + + num_attempts = 0 + start_time = time.time() + + while True: + num_attempts += 1 + + try: + result = func(*args, **kwargs) + except AssertionError: + + now = time.time() + # Compensate for time adjustment + if now < start_time: + start_time = now + + if (now - start_time) > timeout_sec: + logging.exception( + 'Exceeded retry timeout of %s sec in %s attempts ' + 'with func %r. Caller\'s stack:\n%s', + timeout_sec, num_attempts, func, + ''.join(traceback.format_stack())) + raise + + logging.debug('Attempt %s failed; retrying %r in %s sec.', + num_attempts, func, retry_interval_sec) + + time.sleep(retry_interval_sec) + else: + logging.debug('%r succeeded at attempt %s', + func, num_attempts) + return result + + return retry_assertion_wrap + + return retry_assertion_decorator + diff -Nru python-pika-0.10.0/tests/unit/base_connection_tests.py python-pika-0.11.0/tests/unit/base_connection_tests.py --- python-pika-0.10.0/tests/unit/base_connection_tests.py 2015-09-02 17:29:24.000000000 +0000 +++ python-pika-0.11.0/tests/unit/base_connection_tests.py 2017-08-29 16:54:39.000000000 +0000 @@ -12,14 +12,62 @@ except ImportError: import unittest +import pika from pika.adapters import base_connection class BaseConnectionTests(unittest.TestCase): + def setUp(self): + with mock.patch('pika.connection.Connection.connect'): + self.connection = base_connection.BaseConnection() + self.connection._set_connection_state( + base_connection.BaseConnection.CONNECTION_OPEN) + + def test_repr(self): + text = repr(self.connection) + self.assertTrue(text.startswith('