diff -Nru rabbitmq-server-1.7.2/BUILD rabbitmq-server-2.3.1/BUILD --- rabbitmq-server-1.7.2/BUILD 2010-02-15 16:01:13.000000000 +0000 +++ rabbitmq-server-2.3.1/BUILD 1970-01-01 00:00:00.000000000 +0000 @@ -1,89 +0,0 @@ - Messaging that just works - - RabbitMQ - - * Home - * Download - * Documentation - * Get Started - * Services - * Partners - * Community - * Cloud - * FAQ - * Search - - -------------------------------------------------------------------------- - - This section describes the process for obtaining a copy of the RabbitMQ - server source code, as well as instructions for building the server from - source. - - Table of Contents - - * Obtaining the source - * Required Libraries and Tools - * Building the server - -Obtaining the source - - * Either download a released source code distribution from the download - page, or - * Check the code out directly from our mercurial repositories: - - $ hg clone http://hg.rabbitmq.com/rabbitmq-codegen - $ hg clone http://hg.rabbitmq.com/rabbitmq-server - $ cd rabbitmq-server - $ make - - If you choose to check the code out using mercurial, be aware that the - code-generation module is a dependency of the server library. If you're - working with a released source code distribution, though, the - code-generation module is included. - -Required Libraries and Tools - - In order to build RabbitMQ, you will need a few tools. - - The server requires a recent version of Python and simplejson.py (an - implementation of a JSON reader and writer in Python), for generating AMQP - framing code. simplejson.py is included as a standard json library in the - Python core since 2.6 release. - - Additionally, for building the server, you will need - - * the Erlang development and runtime tools - If you are on a Debian-based system then you need the following - packages installed as a minimum: erlang-dev, erlang-base or - erlang-base-hipe, erlang-ssl, erlang-os-mon, erlang-mnesia (on some - older distributions the last three are bundled into erlang-nox). If - you are building and installing Erlang from source then you must - ensure that openssl is installed on your system. - * a recent version of GNU make - -Building the server - - Change to the rabbitmq-server directory, and type make. - - Other interesting Makefile targets include - - all - The default target. Builds the server. - - run - Builds the server and starts an instance with an interactive - Erlang shell. This will by default create a Mnesia database in - /tmp/rabbit-mnesia, but this location can be overridden by setting - the Makefile variable MNESIA_DIR: - - make run MNESIA_DIR=/some/other/location/for/rabbit-mnesia - - clean - Removes build products and wipes the Mnesia database directory - used by the run target. See the above description of MNESIA_DIR. - - srcdist - Runs the "clean" target, and constructs a source-code distribution - tarball in ./dist. - - About us RabbitMQ™ is a Trademark of Rabbit Technologies Ltd. diff -Nru rabbitmq-server-1.7.2/codegen/amqp-0.8.json rabbitmq-server-2.3.1/codegen/amqp-0.8.json --- rabbitmq-server-1.7.2/codegen/amqp-0.8.json 2010-02-15 16:01:13.000000000 +0000 +++ rabbitmq-server-2.3.1/codegen/amqp-0.8.json 1970-01-01 00:00:00.000000000 +0000 @@ -1,652 +0,0 @@ -{ - "name": "AMQP", - "major-version": 8, - "minor-version": 0, - "port": 5672, - "copyright": [ - "Copyright (C) 2008-2009 LShift Ltd, Cohesive Financial Technologies LLC,\n", - "and Rabbit Technologies Ltd\n", - "\n", - "Permission is hereby granted, free of charge, to any person\n", - "obtaining a copy of this file (the \"Software\"), to deal in the\n", - "Software without restriction, including without limitation the \n", - "rights to use, copy, modify, merge, publish, distribute, \n", - "sublicense, and/or sell copies of the Software, and to permit \n", - "persons to whom the Software is furnished to do so, subject to \n", - "the following conditions:\n", - "\n", - "The above copyright notice and this permission notice shall be\n", - "included in all copies or substantial portions of the Software.\n", - "\n", - "THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n", - "EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES\n", - "OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\n", - "NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\n", - "HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,\n", - "WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n", - "FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR\n", - "OTHER DEALINGS IN THE SOFTWARE.\n", - "\n", - "Class information entered from amqp_xml0-8.pdf and domain types from amqp-xml-doc0-9.pdf\n", - "\n", - "b3cb053f15e7b98808c0ccc67f23cb3e amqp_xml0-8.pdf\n", - "http://www.twiststandards.org/index.php?option=com_docman&task=cat_view&gid=28&&Itemid=90\n", - "8444db91e2949dbecfb2585e9eef6d64 amqp-xml-doc0-9.pdf\n", - "https://jira.amqp.org/confluence/download/attachments/720900/amqp-xml-doc0-9.pdf?version=1\n"], - - "domains": [ - ["access-ticket", "short"], - ["bit", "bit"], - ["channel-id", "longstr"], - ["class-id", "short"], - ["consumer-tag", "shortstr"], - ["delivery-tag", "longlong"], - ["destination", "shortstr"], - ["duration", "longlong"], - ["exchange-name", "shortstr"], - ["known-hosts", "shortstr"], - ["long", "long"], - ["longlong", "longlong"], - ["longstr", "longstr"], - ["method-id", "short"], - ["no-ack", "bit"], - ["no-local", "bit"], - ["octet", "octet"], - ["offset", "longlong"], - ["path", "shortstr"], - ["peer-properties", "table"], - ["queue-name", "shortstr"], - ["redelivered", "bit"], - ["reference", "longstr"], - ["reject-code", "short"], - ["reject-text", "shortstr"], - ["reply-code", "short"], - ["reply-text", "shortstr"], - ["security-token", "longstr"], - ["short", "short"], - ["shortstr", "shortstr"], - ["table", "table"], - ["timestamp", "timestamp"] - ], - - "constants": [ - {"name": "FRAME-METHOD", "value": 1}, - {"name": "FRAME-HEADER", "value": 2}, - {"name": "FRAME-BODY", "value": 3}, - {"name": "FRAME-OOB-METHOD", "value": 4}, - {"name": "FRAME-OOB-HEADER", "value": 5}, - {"name": "FRAME-OOB-BODY", "value": 6}, - {"name": "FRAME-TRACE", "value": 7}, - {"name": "FRAME-HEARTBEAT", "value": 8}, - {"name": "FRAME-MIN-SIZE", "value": 4096}, - {"name": "FRAME-END", "value": 206}, - {"name": "REPLY-SUCCESS", "value": 200}, - {"name": "NOT-DELIVERED", "value": 310, "class": "soft-error"}, - {"name": "CONTENT-TOO-LARGE", "value": 311, "class": "soft-error"}, - {"name": "NO-ROUTE", "value": 312, "class": "soft-error"}, - {"name": "NO-CONSUMERS", "value": 313, "class": "soft-error"}, - {"name": "ACCESS-REFUSED", "value": 403, "class": "soft-error"}, - {"name": "NOT-FOUND", "value": 404, "class": "soft-error"}, - {"name": "RESOURCE-LOCKED", "value": 405, "class": "soft-error"}, - {"name": "PRECONDITION-FAILED", "value": 406, "class": "soft-error"}, - {"name": "CONNECTION-FORCED", "value": 320, "class": "hard-error"}, - {"name": "INVALID-PATH", "value": 402, "class": "hard-error"}, - {"name": "FRAME-ERROR", "value": 501, "class": "hard-error"}, - {"name": "SYNTAX-ERROR", "value": 502, "class": "hard-error"}, - {"name": "COMMAND-INVALID", "value": 503, "class": "hard-error"}, - {"name": "CHANNEL-ERROR", "value": 504, "class": "hard-error"}, - {"name": "RESOURCE-ERROR", "value": 506, "class": "hard-error"}, - {"name": "NOT-ALLOWED", "value": 530, "class": "hard-error"}, - {"name": "NOT-IMPLEMENTED", "value": 540, "class": "hard-error"}, - {"name": "INTERNAL-ERROR", "value": 541, "class": "hard-error"} - ], - - "classes": [ - { - "id": 10, - "methods": [{"id": 10, - "arguments": [{"type": "octet", "name": "version-major", "default-value": 0}, - {"type": "octet", "name": "version-minor", "default-value": 8}, - {"domain": "peer-properties", "name": "server properties"}, - {"type": "longstr", "name": "mechanisms", "default-value": "PLAIN"}, - {"type": "longstr", "name": "locales", "default-value": "en_US"}], - "name": "start", - "synchronous" : "true"}, - {"id": 11, - "arguments": [{"domain": "peer-properties", "name": "client-properties"}, - {"type": "shortstr", "name": "mechanism", "default-value": "PLAIN"}, - {"type": "longstr", "name": "response"}, - {"type": "shortstr", "name": "locale", "default-value": "en_US"}], - "name": "start-ok"}, - {"id": 20, - "arguments": [{"type": "longstr", "name": "challenge"}], - "name": "secure", - "synchronous" : "true"}, - {"id": 21, - "arguments": [{"type": "longstr", "name": "response"}], - "name": "secure-ok"}, - {"id": 30, - "arguments": [{"type": "short", "name": "channel-max", "default-value": 0}, - {"type": "long", "name": "frame-max", "default-value": 0}, - {"type": "short", "name": "heartbeat", "default-value": 0}], - "name": "tune", - "synchronous" : "true"}, - {"id": 31, - "arguments": [{"type": "short", "name": "channel-max", "default-value": 0}, - {"type": "long", "name": "frame-max", "default-value": 0}, - {"type": "short", "name": "heartbeat", "default-value": 0}], - "name": "tune-ok"}, - {"id": 40, - "arguments": [{"type": "shortstr", "name": "virtual-host", "default-value": "/"}, - {"type": "shortstr", "name": "capabilities", "default-value": ""}, - {"type": "bit", "name": "insist", "default-value": false}], - "name": "open", - "synchronous" : "true"}, - {"id": 41, - "arguments": [{"type": "shortstr", "name": "known-hosts", "default-value": ""}], - "name": "open-ok"}, - {"id": 50, - "arguments": [{"type": "shortstr", "name": "host"}, - {"type": "shortstr", "name": "known-hosts", "default-value": ""}], - "name": "redirect"}, - {"id": 60, - "arguments": [{"type": "short", "name": "reply-code"}, - {"type": "shortstr", "name": "reply-text", "default-value": ""}, - {"type": "short", "name": "class-id"}, - {"type": "short", "name": "method-id"}], - "name": "close", - "synchronous" : "true"}, - {"id": 61, - "arguments": [], - "name": "close-ok"}], - "name": "connection", - "properties": [] - }, - { - "id": 20, - "methods": [{"id": 10, - "arguments": [{"type": "shortstr", "name": "out-of-band", "default-value": ""}], - "name": "open", - "synchronous" : "true"}, - {"id": 11, - "arguments": [], - "name": "open-ok"}, - {"id": 20, - "arguments": [{"type": "bit", "name": "active"}], - "name": "flow", - "synchronous" : "true"}, - {"id": 21, - "arguments": [{"type": "bit", "name": "active"}], - "name": "flow-ok"}, - {"id": 30, - "arguments": [{"type": "short", "name": "reply-code"}, - {"type": "shortstr", "name": "reply-text", "default-value": ""}, - {"type": "table", "name": "details", "default-value": {}}], - "name": "alert"}, - {"id": 40, - "arguments": [{"type": "short", "name": "reply-code"}, - {"type": "shortstr", "name": "reply-text", "default-value": ""}, - {"type": "short", "name": "class-id"}, - {"type": "short", "name": "method-id"}], - "name": "close", - "synchronous" : "true"}, - {"id": 41, - "arguments": [], - "name": "close-ok"}], - "name": "channel" - }, - { - "id": 30, - "methods": [{"id": 10, - "arguments": [{"type": "shortstr", "name": "realm", "default-value": "/data"}, - {"type": "bit", "name": "exclusive", "default-value": false}, - {"type": "bit", "name": "passive", "default-value": true}, - {"type": "bit", "name": "active", "default-value": true}, - {"type": "bit", "name": "write", "default-value": true}, - {"type": "bit", "name": "read", "default-value": true}], - "name": "request", - "synchronous" : "true"}, - {"id": 11, - "arguments": [{"type": "short", "name": "ticket", "default-value": 1}], - "name": "request-ok"}], - "name": "access" - }, - { - "id": 40, - "methods": [{"id": 10, - "arguments": [{"type": "short", "name": "ticket", "default-value": 1}, - {"type": "shortstr", "name": "exchange"}, - {"type": "shortstr", "name": "type", "default-value": "direct"}, - {"type": "bit", "name": "passive", "default-value": false}, - {"type": "bit", "name": "durable", "default-value": false}, - {"type": "bit", "name": "auto-delete", "default-value": false}, - {"type": "bit", "name": "internal", "default-value": false}, - {"type": "bit", "name": "nowait", "default-value": false}, - {"type": "table", "name": "arguments", "default-value": {}}], - "name": "declare", - "synchronous" : "true"}, - {"id": 11, - "arguments": [], - "name": "declare-ok"}, - {"id": 20, - "arguments": [{"type": "short", "name": "ticket", "default-value": 1}, - {"type": "shortstr", "name": "exchange"}, - {"type": "bit", "name": "if-unused", "default-value": false}, - {"type": "bit", "name": "nowait", "default-value": false}], - "name": "delete", - "synchronous" : "true"}, - {"id": 21, - "arguments": [], - "name": "delete-ok"}], - "name": "exchange" - }, - { - "id": 50, - "methods": [{"id": 10, - "arguments": [{"type": "short", "name": "ticket", "default-value": 1}, - {"type": "shortstr", "name": "queue", "default-value": ""}, - {"type": "bit", "name": "passive", "default-value": false}, - {"type": "bit", "name": "durable", "default-value": false}, - {"type": "bit", "name": "exclusive", "default-value": false}, - {"type": "bit", "name": "auto-delete", "default-value": false}, - {"type": "bit", "name": "nowait", "default-value": false}, - {"type": "table", "name": "arguments", "default-value": {}}], - "name": "declare", - "synchronous" : "true"}, - {"id": 11, - "arguments": [{"type": "shortstr", "name": "queue"}, - {"type": "long", "name": "message-count"}, - {"type": "long", "name": "consumer-count"}], - "name": "declare-ok"}, - {"id": 20, - "arguments": [{"type": "short", "name": "ticket", "default-value": 1}, - {"type": "shortstr", "name": "queue"}, - {"type": "shortstr", "name": "exchange"}, - {"type": "shortstr", "name": "routing-key", "default-value": ""}, - {"type": "bit", "name": "nowait", "default-value": false}, - {"type": "table", "name": "arguments", "default-value": {}}], - "name": "bind", - "synchronous" : "true"}, - {"id": 21, - "arguments": [], - "name": "bind-ok"}, - {"id": 30, - "arguments": [{"type": "short", "name": "ticket", "default-value": 1}, - {"type": "shortstr", "name": "queue"}, - {"type": "bit", "name": "nowait", "default-value": false}], - "name": "purge", - "synchronous" : "true"}, - {"id": 31, - "arguments": [{"type": "long", "name": "message-count"}], - "name": "purge-ok"}, - {"id": 40, - "arguments": [{"type": "short", "name": "ticket", "default-value": 1}, - {"type": "shortstr", "name": "queue"}, - {"type": "bit", "name": "if-unused", "default-value": false}, - {"type": "bit", "name": "if-empty", "default-value": false}, - {"type": "bit", "name": "nowait", "default-value": false}], - "name": "delete", - "synchronous" : "true"}, - {"id": 41, - "arguments": [{"type": "long", "name": "message-count"}], - "name": "delete-ok"}, - {"id": 50, - "arguments": [{"type": "short", "name": "ticket", "default-value": 1}, - {"type": "shortstr", "name": "queue"}, - {"type": "shortstr", "name": "exchange"}, - {"type": "shortstr", "name": "routing-key", "default-value": ""}, - {"type": "table", "name": "arguments", "default-value": {}}], - "name": "unbind", - "synchronous" : "true"}, - {"id": 51, - "arguments": [], - "name": "unbind-ok"} - ], - "name": "queue" - }, - { - "id": 60, - "methods": [{"id": 10, - "arguments": [{"type": "long", "name": "prefetch-size", "default-value": 0}, - {"type": "short", "name": "prefetch-count", "default-value": 0}, - {"type": "bit", "name": "global", "default-value": false}], - "name": "qos", - "synchronous" : "true"}, - {"id": 11, - "arguments": [], - "name": "qos-ok"}, - {"id": 20, - "arguments": [{"domain": "access-ticket", "name": "ticket", "default-value": 1}, - {"domain": "queue-name", "name": "queue"}, - {"type": "shortstr", "name": "consumer-tag", "default-value": ""}, - {"type": "bit", "name": "no-local", "default-value": false}, - {"type": "bit", "name": "no-ack", "default-value": false}, - {"type": "bit", "name": "exclusive", "default-value": false}, - {"type": "bit", "name": "nowait", "default-value": false}], - "name": "consume", - "synchronous" : "true"}, - {"id": 21, - "arguments": [{"type": "shortstr", "name": "consumer-tag"}], - "name": "consume-ok"}, - {"id": 30, - "arguments": [{"type": "shortstr", "name": "consumer-tag"}, - {"type": "bit", "name": "nowait", "default-value": false}], - "name": "cancel", - "synchronous" : "true"}, - {"id": 31, - "arguments": [{"type": "shortstr", "name": "consumer-tag"}], - "name": "cancel-ok"}, - {"content": true, - "id": 40, - "arguments": [{"type": "short", "name": "ticket", "default-value": 1}, - {"type": "shortstr", "name": "exchange", "default-value": ""}, - {"type": "shortstr", "name": "routing-key", "default-value": ""}, - {"type": "bit", "name": "mandatory", "default-value": false}, - {"type": "bit", "name": "immediate", "default-value": false}], - "name": "publish"}, - {"content": true, - "id": 50, - "arguments": [{"type": "short", "name": "reply-code"}, - {"type": "shortstr", "name": "reply-text", "default-value": ""}, - {"type": "shortstr", "name": "exchange"}, - {"type": "shortstr", "name": "routing-key"}], - "name": "return"}, - {"content": true, - "id": 60, - "arguments": [{"type": "shortstr", "name": "consumer-tag"}, - {"type": "longlong", "name": "delivery-tag"}, - {"type": "bit", "name": "redelivered", "default-value": false}, - {"type": "shortstr", "name": "exchange"}, - {"type": "shortstr", "name": "routing-key"}], - "name": "deliver"}, - {"id": 70, - "arguments": [{"type": "short", "name": "ticket", "default-value": 1}, - {"type": "shortstr", "name": "queue"}, - {"type": "bit", "name": "no-ack", "default-value": false}], - "name": "get", - "synchronous" : "true"}, - {"content": true, - "id": 71, - "arguments": [{"type": "longlong", "name": "delivery-tag"}, - {"type": "bit", "name": "redelivered", "default-value": false}, - {"type": "shortstr", "name": "exchange"}, - {"type": "shortstr", "name": "routing-key"}, - {"type": "long", "name": "message-count"}], - "name": "get-ok"}, - {"id": 72, - "arguments": [{"type": "shortstr", "name": "cluster-id", "default-value": ""}], - "name": "get-empty"}, - {"id": 80, - "arguments": [{"type": "longlong", "name": "delivery-tag", "default-value": 0}, - {"type": "bit", "name": "multiple", "default-value": false}], - "name": "ack"}, - {"id": 90, - "arguments": [{"type": "longlong", "name": "delivery-tag"}, - {"type": "bit", "name": "requeue", "default-value": true}], - "name": "reject"}, - {"id": 100, - "arguments": [{"type": "bit", "name": "requeue", "default-value": false}], - "name": "recover"}], - "name": "basic", - "properties": [{"type": "shortstr", "name": "content-type"}, - {"type": "shortstr", "name": "content-encoding"}, - {"type": "table", "name": "headers"}, - {"type": "octet", "name": "delivery-mode"}, - {"type": "octet", "name": "priority"}, - {"type": "shortstr", "name": "correlation-id"}, - {"type": "shortstr", "name": "reply-to"}, - {"type": "shortstr", "name": "expiration"}, - {"type": "shortstr", "name": "message-id"}, - {"type": "timestamp", "name": "timestamp"}, - {"type": "shortstr", "name": "type"}, - {"type": "shortstr", "name": "user-id"}, - {"type": "shortstr", "name": "app-id"}, - {"type": "shortstr", "name": "cluster-id"}] - }, - { - "id": 70, - "methods": [{"id": 10, - "arguments": [{"type": "long", "name": "prefetch-size", "default-value": 0}, - {"type": "short", "name": "prefetch-count", "default-value": 0}, - {"type": "bit", "name": "global", "default-value": false}], - "name": "qos", - "synchronous" : "true"}, - {"id": 11, - "arguments": [], - "name": "qos-ok"}, - {"id": 20, - "arguments": [{"type": "short", "name": "ticket", "default-value": 1}, - {"type": "shortstr", "name": "queue"}, - {"type": "shortstr", "name": "consumer-tag", "default-value": ""}, - {"type": "bit", "name": "no-local", "default-value": false}, - {"type": "bit", "name": "no-ack", "default-value": false}, - {"type": "bit", "name": "exclusive", "default-value": false}, - {"type": "bit", "name": "nowait", "default-value": false}], - "name": "consume", - "synchronous" : "true"}, - {"id": 21, - "arguments": [{"type": "shortstr", "name": "consumer-tag"}], - "name": "consume-ok"}, - {"id": 30, - "arguments": [{"type": "shortstr", "name": "consumer-tag"}, - {"type": "bit", "name": "nowait", "default-value": false}], - "name": "cancel", - "synchronous" : "true"}, - {"id": 31, - "arguments": [{"type": "shortstr", "name": "consumer-tag"}], - "name": "cancel-ok"}, - {"id": 40, - "arguments": [{"type": "shortstr", "name": "identifier"}, - {"type": "longlong", "name": "content-size"}], - "name": "open", - "synchronous" : "true"}, - {"id": 41, - "arguments": [{"type": "longlong", "name": "staged-size"}], - "name": "open-ok"}, - {"content": true, - "id": 50, - "arguments": [], - "name": "stage"}, - {"id": 60, - "arguments": [{"type": "short", "name": "ticket", "default-value": 1}, - {"type": "shortstr", "name": "exchange", "default-value": ""}, - {"type": "shortstr", "name": "routing-key", "default-value": ""}, - {"type": "bit", "name": "mandatory", "default-value": false}, - {"type": "bit", "name": "immediate", "default-value": false}, - {"type": "shortstr", "name": "identifier"}], - "name": "publish"}, - {"content": true, - "id": 70, - "arguments": [{"type": "short", "name": "reply-code", "default-value": 200}, - {"type": "shortstr", "name": "reply-text", "default-value": ""}, - {"type": "shortstr", "name": "exchange"}, - {"type": "shortstr", "name": "routing-key"}], - "name": "return"}, - {"id": 80, - "arguments": [{"type": "shortstr", "name": "consumer-tag"}, - {"type": "longlong", "name": "delivery-tag"}, - {"type": "bit", "name": "redelivered", "default-value": false}, - {"type": "shortstr", "name": "exchange"}, - {"type": "shortstr", "name": "routing-key"}, - {"type": "shortstr", "name": "identifier"}], - "name": "deliver"}, - {"id": 90, - "arguments": [{"type": "longlong", "name": "delivery-tag", "default-value": 0}, - {"type": "bit", "name": "multiple", "default-value": false}], - "name": "ack"}, - {"id": 100, - "arguments": [{"type": "longlong", "name": "delivery-tag"}, - {"type": "bit", "name": "requeue", "default-value": true}], - "name": "reject"}], - "name": "file", - "properties": [{"type": "shortstr", "name": "content-type"}, - {"type": "shortstr", "name": "content-encoding"}, - {"type": "table", "name": "headers"}, - {"type": "octet", "name": "priority"}, - {"type": "shortstr", "name": "reply-to"}, - {"type": "shortstr", "name": "message-id"}, - {"type": "shortstr", "name": "filename"}, - {"type": "timestamp", "name": "timestamp"}, - {"type": "shortstr", "name": "cluster-id"}] - }, - { - "id": 80, - "methods": [{"id": 10, - "arguments": [{"type": "long", "name": "prefetch-size", "default-value": 0}, - {"type": "short", "name": "prefetch-count", "default-value": 0}, - {"type": "long", "name": "consume-rate", "default-value": 0}, - {"type": "bit", "name": "global", "default-value": false}], - "name": "qos", - "synchronous" : "true"}, - {"id": 11, - "arguments": [], - "name": "qos-ok"}, - {"id": 20, - "arguments": [{"type": "short", "name": "ticket", "default-value": 1}, - {"type": "shortstr", "name": "queue"}, - {"type": "shortstr", "name": "consumer-tag", "default-value": ""}, - {"type": "bit", "name": "no-local", "default-value": false}, - {"type": "bit", "name": "exclusive", "default-value": false}, - {"type": "bit", "name": "nowait", "default-value": false}], - "name": "consume", - "synchronous" : "true"}, - {"id": 21, - "arguments": [{"type": "shortstr", "name": "consumer-tag"}], - "name": "consume-ok"}, - {"id": 30, - "arguments": [{"type": "shortstr", "name": "consumer-tag"}, - {"type": "bit", "name": "nowait", "default-value": false}], - "name": "cancel", - "synchronous" : "true"}, - {"id": 31, - "arguments": [{"type": "shortstr", "name": "consumer-tag"}], - "name": "cancel-ok"}, - {"content": true, - "id": 40, - "arguments": [{"type": "short", "name": "ticket", "default-value": 1}, - {"type": "shortstr", "name": "exchange", "default-value": ""}, - {"type": "shortstr", "name": "routing-key", "default-value": ""}, - {"type": "bit", "name": "mandatory", "default-value": false}, - {"type": "bit", "name": "immediate", "default-value": false}], - "name": "publish"}, - {"content": true, - "id": 50, - "arguments": [{"type": "short", "name": "reply-code", "default-value": 200}, - {"type": "shortstr", "name": "reply-text", "default-value": ""}, - {"type": "shortstr", "name": "exchange"}, - {"type": "shortstr", "name": "routing-key"}], - "name": "return"}, - {"content": true, - "id": 60, - "arguments": [{"type": "shortstr", "name": "consumer-tag"}, - {"type": "longlong", "name": "delivery-tag"}, - {"type": "shortstr", "name": "exchange"}, - {"type": "shortstr", "name": "queue"}], - "name": "deliver"}], - "name": "stream", - "properties": [{"type": "shortstr", "name": "content-type"}, - {"type": "shortstr", "name": "content-encoding"}, - {"type": "table", "name": "headers"}, - {"type": "octet", "name": "priority"}, - {"type": "timestamp", "name": "timestamp"}] - }, - { - "id": 90, - "methods": [{"id": 10, - "arguments": [], - "name": "select", - "synchronous" : "true"}, - {"id": 11, - "arguments": [], - "name": "select-ok"}, - {"id": 20, - "arguments": [], - "name": "commit", - "synchronous" : "true"}, - {"id": 21, - "arguments": [], - "name": "commit-ok"}, - {"id": 30, - "arguments": [], - "name": "rollback", - "synchronous" : "true"}, - {"id": 31, - "arguments": [], - "name": "rollback-ok"}], - "name": "tx" - }, - { - "id": 100, - "methods": [{"id": 10, - "arguments": [], - "name": "select", - "synchronous" : "true"}, - {"id": 11, - "arguments": [], - "name": "select-ok"}, - {"id": 20, - "arguments": [{"type": "shortstr", "name": "dtx-identifier"}], - "name": "start", - "synchronous" : "true"}, - {"id": 21, - "arguments": [], "name": "start-ok"}], - "name": "dtx" - }, - { - "id": 110, - "methods": [{"content": true, - "id": 10, - "arguments": [{"type": "table", "name": "meta-data"}], - "name": "request"}], - "name": "tunnel", - "properties": [{"type": "table", "name": "headers"}, - {"type": "shortstr", "name": "proxy-name"}, - {"type": "shortstr", "name": "data-name"}, - {"type": "octet", "name": "durable"}, - {"type": "octet", "name": "broadcast"}] - }, - { - "id": 120, - "methods": [{"id": 10, - "arguments": [{"type": "octet", "name": "integer-1"}, - {"type": "short", "name": "integer-2"}, - {"type": "long", "name": "integer-3"}, - {"type": "longlong", "name": "integer-4"}, - {"type": "octet", "name": "operation"}], - "name": "integer", - "synchronous" : "true"}, - {"id": 11, - "arguments": [{"type": "longlong", "name": "result"}], - "name": "integer-ok"}, - {"id": 20, - "arguments": [{"type": "shortstr", "name": "string-1"}, - {"type": "longstr", "name": "string-2"}, - {"type": "octet", "name": "operation"}], - "name": "string", - "synchronous" : "true"}, - {"id": 21, - "arguments": [{"type": "longstr", "name": "result"}], - "name": "string-ok"}, - {"id": 30, - "arguments": [{"type": "table", "name": "table"}, - {"type": "octet", "name": "integer-op"}, - {"type": "octet", "name": "string-op"}], - "name": "table", - "synchronous" : "true"}, - {"id": 31, - "arguments": [{"type": "longlong", "name": "integer-result"}, - {"type": "longstr", "name": "string-result"}], - "name": "table-ok"}, - {"content": true, - "id": 40, - "arguments": [], - "name": "content", - "synchronous" : "true"}, - {"content": true, - "id": 41, - "arguments": [{"type": "long", "name": "content-checksum"}], - "name": "content-ok"}], - "name": "test" - } - ] -} diff -Nru rabbitmq-server-1.7.2/codegen/amqp_codegen.py rabbitmq-server-2.3.1/codegen/amqp_codegen.py --- rabbitmq-server-1.7.2/codegen/amqp_codegen.py 2010-02-15 16:01:13.000000000 +0000 +++ rabbitmq-server-2.3.1/codegen/amqp_codegen.py 2011-02-03 12:47:36.000000000 +0000 @@ -1,38 +1,24 @@ -## The contents of this file are subject to the Mozilla Public License -## Version 1.1 (the "License"); you may not use this file except in -## compliance with the License. You may obtain a copy of the License at -## http://www.mozilla.org/MPL/ +## The contents of this file are subject to the Mozilla Public License +## Version 1.1 (the "License"); you may not use this file except in +## compliance with the License. You may obtain a copy of the License +## at http://www.mozilla.org/MPL/ ## -## Software distributed under the License is distributed on an "AS IS" -## basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the -## License for the specific language governing rights and limitations -## under the License. +## Software distributed under the License is distributed on an "AS IS" +## basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +## the License for the specific language governing rights and +## limitations under the License. ## -## The Original Code is RabbitMQ. +## The Original Code is RabbitMQ. ## -## The Initial Developers of the Original Code are LShift Ltd, -## Cohesive Financial Technologies LLC, and Rabbit Technologies Ltd. -## -## Portions created before 22-Nov-2008 00:00:00 GMT by LShift Ltd, -## Cohesive Financial Technologies LLC, or Rabbit Technologies Ltd -## are Copyright (C) 2007-2008 LShift Ltd, Cohesive Financial -## Technologies LLC, and Rabbit Technologies Ltd. -## -## Portions created by LShift Ltd are Copyright (C) 2007-2009 LShift -## Ltd. Portions created by Cohesive Financial Technologies LLC are -## Copyright (C) 2007-2009 Cohesive Financial Technologies -## LLC. Portions created by Rabbit Technologies Ltd are Copyright -## (C) 2007-2009 Rabbit Technologies Ltd. -## -## All Rights Reserved. -## -## Contributor(s): ______________________________________. +## The Initial Developer of the Original Code is VMware, Inc. +## Copyright (c) 2007-2011 VMware, Inc. All rights reserved. ## from __future__ import nested_scopes import re import sys -from os import remove +import os +from optparse import OptionParser try: try: @@ -58,13 +44,102 @@ for t in ['octet', 'shortstr', 'longstr', 'short', 'long', 'longlong', 'bit', 'table', 'timestamp']: d[t] = t + +class AmqpSpecFileMergeConflict(Exception): pass + +# If ignore_conflicts is true, then we allow acc and new to conflict, +# with whatever's already in acc winning and new being ignored. If +# ignore_conflicts is false, acc and new must not conflict. + +def default_spec_value_merger(key, acc, new, ignore_conflicts): + if acc is None or acc == new or ignore_conflicts: + return new + else: + raise AmqpSpecFileMergeConflict(key, acc, new) + +def extension_info_merger(key, acc, new, ignore_conflicts): + return acc + [new] + +def domains_merger(key, acc, new, ignore_conflicts): + merged = dict((k, v) for [k, v] in acc) + for [k, v] in new: + if merged.has_key(k): + if not ignore_conflicts: + raise AmqpSpecFileMergeConflict(key, acc, new) + else: + merged[k] = v + + return [[k, v] for (k, v) in merged.iteritems()] + +def merge_dict_lists_by(dict_key, acc, new, ignore_conflicts): + acc_index = set(v[dict_key] for v in acc) + result = list(acc) # shallow copy + for v in new: + if v[dict_key] in acc_index: + if not ignore_conflicts: + raise AmqpSpecFileMergeConflict(description, acc, new) + else: + result.append(v) + return result + +def constants_merger(key, acc, new, ignore_conflicts): + return merge_dict_lists_by("name", acc, new, ignore_conflicts) + +def methods_merger(classname, acc, new, ignore_conflicts): + return merge_dict_lists_by("name", acc, new, ignore_conflicts) + +def properties_merger(classname, acc, new, ignore_conflicts): + return merge_dict_lists_by("name", acc, new, ignore_conflicts) + +def class_merger(acc, new, ignore_conflicts): + acc["methods"] = methods_merger(acc["name"], + acc["methods"], + new["methods"], + ignore_conflicts) + acc["properties"] = properties_merger(acc["name"], + acc.get("properties", []), + new.get("properties", []), + ignore_conflicts) + +def classes_merger(key, acc, new, ignore_conflicts): + acc_dict = dict((v["name"], v) for v in acc) + result = list(acc) # shallow copy + for w in new: + if w["name"] in acc_dict: + class_merger(acc_dict[w["name"]], w, ignore_conflicts) + else: + result.append(w) + return result + +mergers = { + "extension": (extension_info_merger, []), + "domains": (domains_merger, []), + "constants": (constants_merger, []), + "classes": (classes_merger, []), +} + +def merge_load_specs(filenames, ignore_conflicts): + handles = [file(filename) for filename in filenames] + docs = [json.load(handle) for handle in handles] + spec = {} + for doc in docs: + for (key, value) in doc.iteritems(): + (merger, default_value) = mergers.get(key, (default_spec_value_merger, None)) + spec[key] = merger(key, spec.get(key, default_value), value, ignore_conflicts) + for handle in handles: handle.close() + return spec class AmqpSpec: - def __init__(self, filename): - self.spec = json.load(file(filename)) + # Slight wart: use a class member rather than change the ctor signature + # to avoid breaking everyone else's code. + ignore_conflicts = False + + def __init__(self, filenames): + self.spec = merge_load_specs(filenames, AmqpSpec.ignore_conflicts) self.major = self.spec['major-version'] self.minor = self.spec['minor-version'] + self.revision = self.spec.has_key('revision') and self.spec['revision'] or 0 self.port = self.spec['port'] self.domains = {} @@ -82,7 +157,7 @@ self.classes = [] for element in self.spec['classes']: - self.classes.append(AmqpClass(self.spec, element)) + self.classes.append(AmqpClass(self, element)) def allClasses(self): return self.classes @@ -169,34 +244,43 @@ def __repr__(self): return 'AmqpField("' + self.name + '")' -def do_main(header_fn,body_fn): +def do_main(header_fn, body_fn): + do_main_dict({"header": header_fn, "body": body_fn}) + +def do_main_dict(funcDict): def usage(): print >> sys.stderr , "Usage:" - print >> sys.stderr , " %s header|body path_to_amqp_spec.json path_to_output_file" % (sys.argv[0]) - print >> sys.stderr , "" - - def execute(fn, amqp_spec, out_file): + print >> sys.stderr , " %s ... " % (sys.argv[0]) + print >> sys.stderr , " where is one of %s" % ", ".join([k for k in funcDict.keys()]) + + def execute(fn, amqp_specs, out_file): stdout = sys.stdout f = open(out_file, 'w') + success = False try: - try: - sys.stdout = f - fn(amqp_spec) - except: - remove(out_file) - raise + sys.stdout = f + fn(amqp_specs) + success = True finally: sys.stdout = stdout f.close() + if not success: + os.remove(out_file) + + parser = OptionParser() + parser.add_option("--ignore-conflicts", action="store_true", dest="ignore_conflicts", default=False) + (options, args) = parser.parse_args() - if not len(sys.argv) == 4: + if len(args) < 3: usage() sys.exit(1) else: - if sys.argv[1] == "header": - execute(header_fn, sys.argv[2], sys.argv[3]) - elif sys.argv[1] == "body": - execute(body_fn, sys.argv[2], sys.argv[3]) + function = args[0] + sources = args[1:-1] + dest = args[-1] + AmqpSpec.ignore_conflicts = options.ignore_conflicts + if funcDict.has_key(function): + execute(funcDict[function], sources, dest) else: usage() sys.exit(1) diff -Nru rabbitmq-server-1.7.2/codegen/amqp-rabbitmq-0.8.json rabbitmq-server-2.3.1/codegen/amqp-rabbitmq-0.8.json --- rabbitmq-server-1.7.2/codegen/amqp-rabbitmq-0.8.json 1970-01-01 00:00:00.000000000 +0000 +++ rabbitmq-server-2.3.1/codegen/amqp-rabbitmq-0.8.json 2011-02-03 12:47:36.000000000 +0000 @@ -0,0 +1,659 @@ +{ + "name": "AMQP", + "major-version": 8, + "minor-version": 0, + "port": 5672, + "copyright": [ + "Copyright (C) 2008-2011 VMware, Inc.\n", + "\n", + "Permission is hereby granted, free of charge, to any person\n", + "obtaining a copy of this file (the \"Software\"), to deal in the\n", + "Software without restriction, including without limitation the \n", + "rights to use, copy, modify, merge, publish, distribute, \n", + "sublicense, and/or sell copies of the Software, and to permit \n", + "persons to whom the Software is furnished to do so, subject to \n", + "the following conditions:\n", + "\n", + "The above copyright notice and this permission notice shall be\n", + "included in all copies or substantial portions of the Software.\n", + "\n", + "THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n", + "EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES\n", + "OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\n", + "NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\n", + "HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,\n", + "WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n", + "FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR\n", + "OTHER DEALINGS IN THE SOFTWARE.\n", + "\n", + "Class information entered from amqp_xml0-8.pdf and domain types from amqp-xml-doc0-9.pdf\n", + "\n", + "b3cb053f15e7b98808c0ccc67f23cb3e amqp_xml0-8.pdf\n", + "http://www.twiststandards.org/index.php?option=com_docman&task=cat_view&gid=28&&Itemid=90\n", + "8444db91e2949dbecfb2585e9eef6d64 amqp-xml-doc0-9.pdf\n", + "https://jira.amqp.org/confluence/download/attachments/720900/amqp-xml-doc0-9.pdf?version=1\n"], + + "domains": [ + ["access-ticket", "short"], + ["bit", "bit"], + ["channel-id", "longstr"], + ["class-id", "short"], + ["consumer-tag", "shortstr"], + ["delivery-tag", "longlong"], + ["destination", "shortstr"], + ["duration", "longlong"], + ["exchange-name", "shortstr"], + ["known-hosts", "shortstr"], + ["long", "long"], + ["longlong", "longlong"], + ["longstr", "longstr"], + ["method-id", "short"], + ["no-ack", "bit"], + ["no-local", "bit"], + ["octet", "octet"], + ["offset", "longlong"], + ["path", "shortstr"], + ["peer-properties", "table"], + ["queue-name", "shortstr"], + ["redelivered", "bit"], + ["reference", "longstr"], + ["reject-code", "short"], + ["reject-text", "shortstr"], + ["reply-code", "short"], + ["reply-text", "shortstr"], + ["security-token", "longstr"], + ["short", "short"], + ["shortstr", "shortstr"], + ["table", "table"], + ["timestamp", "timestamp"] + ], + + "constants": [ + {"name": "FRAME-METHOD", "value": 1}, + {"name": "FRAME-HEADER", "value": 2}, + {"name": "FRAME-BODY", "value": 3}, + {"name": "FRAME-OOB-METHOD", "value": 4}, + {"name": "FRAME-OOB-HEADER", "value": 5}, + {"name": "FRAME-OOB-BODY", "value": 6}, + {"name": "FRAME-TRACE", "value": 7}, + {"name": "FRAME-HEARTBEAT", "value": 8}, + {"name": "FRAME-MIN-SIZE", "value": 4096}, + {"name": "FRAME-END", "value": 206}, + {"name": "REPLY-SUCCESS", "value": 200}, + {"name": "NOT-DELIVERED", "value": 310, "class": "soft-error"}, + {"name": "CONTENT-TOO-LARGE", "value": 311, "class": "soft-error"}, + {"name": "NO-ROUTE", "value": 312, "class": "soft-error"}, + {"name": "NO-CONSUMERS", "value": 313, "class": "soft-error"}, + {"name": "ACCESS-REFUSED", "value": 403, "class": "soft-error"}, + {"name": "NOT-FOUND", "value": 404, "class": "soft-error"}, + {"name": "RESOURCE-LOCKED", "value": 405, "class": "soft-error"}, + {"name": "PRECONDITION-FAILED", "value": 406, "class": "soft-error"}, + {"name": "CONNECTION-FORCED", "value": 320, "class": "hard-error"}, + {"name": "INVALID-PATH", "value": 402, "class": "hard-error"}, + {"name": "FRAME-ERROR", "value": 501, "class": "hard-error"}, + {"name": "SYNTAX-ERROR", "value": 502, "class": "hard-error"}, + {"name": "COMMAND-INVALID", "value": 503, "class": "hard-error"}, + {"name": "CHANNEL-ERROR", "value": 504, "class": "hard-error"}, + {"name": "UNEXPECTED-FRAME", "value": 505, "class": "hard-error"}, + {"name": "RESOURCE-ERROR", "value": 506, "class": "hard-error"}, + {"name": "NOT-ALLOWED", "value": 530, "class": "hard-error"}, + {"name": "NOT-IMPLEMENTED", "value": 540, "class": "hard-error"}, + {"name": "INTERNAL-ERROR", "value": 541, "class": "hard-error"} + ], + + "classes": [ + { + "id": 10, + "methods": [{"id": 10, + "arguments": [{"type": "octet", "name": "version-major", "default-value": 0}, + {"type": "octet", "name": "version-minor", "default-value": 8}, + {"domain": "peer-properties", "name": "server properties"}, + {"type": "longstr", "name": "mechanisms", "default-value": "PLAIN"}, + {"type": "longstr", "name": "locales", "default-value": "en_US"}], + "name": "start", + "synchronous" : true}, + {"id": 11, + "arguments": [{"domain": "peer-properties", "name": "client-properties"}, + {"type": "shortstr", "name": "mechanism", "default-value": "PLAIN"}, + {"type": "longstr", "name": "response"}, + {"type": "shortstr", "name": "locale", "default-value": "en_US"}], + "name": "start-ok"}, + {"id": 20, + "arguments": [{"type": "longstr", "name": "challenge"}], + "name": "secure", + "synchronous" : true}, + {"id": 21, + "arguments": [{"type": "longstr", "name": "response"}], + "name": "secure-ok"}, + {"id": 30, + "arguments": [{"type": "short", "name": "channel-max", "default-value": 0}, + {"type": "long", "name": "frame-max", "default-value": 0}, + {"type": "short", "name": "heartbeat", "default-value": 0}], + "name": "tune", + "synchronous" : true}, + {"id": 31, + "arguments": [{"type": "short", "name": "channel-max", "default-value": 0}, + {"type": "long", "name": "frame-max", "default-value": 0}, + {"type": "short", "name": "heartbeat", "default-value": 0}], + "name": "tune-ok"}, + {"id": 40, + "arguments": [{"type": "shortstr", "name": "virtual-host", "default-value": "/"}, + {"type": "shortstr", "name": "capabilities", "default-value": ""}, + {"type": "bit", "name": "insist", "default-value": false}], + "name": "open", + "synchronous" : true}, + {"id": 41, + "arguments": [{"type": "shortstr", "name": "known-hosts", "default-value": ""}], + "name": "open-ok"}, + {"id": 50, + "arguments": [{"type": "shortstr", "name": "host"}, + {"type": "shortstr", "name": "known-hosts", "default-value": ""}], + "name": "redirect"}, + {"id": 60, + "arguments": [{"type": "short", "name": "reply-code"}, + {"type": "shortstr", "name": "reply-text", "default-value": ""}, + {"type": "short", "name": "class-id"}, + {"type": "short", "name": "method-id"}], + "name": "close", + "synchronous" : true}, + {"id": 61, + "arguments": [], + "name": "close-ok"}], + "name": "connection", + "properties": [] + }, + { + "id": 20, + "methods": [{"id": 10, + "arguments": [{"type": "shortstr", "name": "out-of-band", "default-value": ""}], + "name": "open", + "synchronous" : true}, + {"id": 11, + "arguments": [], + "name": "open-ok"}, + {"id": 20, + "arguments": [{"type": "bit", "name": "active"}], + "name": "flow", + "synchronous" : true}, + {"id": 21, + "arguments": [{"type": "bit", "name": "active"}], + "name": "flow-ok"}, + {"id": 30, + "arguments": [{"type": "short", "name": "reply-code"}, + {"type": "shortstr", "name": "reply-text", "default-value": ""}, + {"type": "table", "name": "details", "default-value": {}}], + "name": "alert"}, + {"id": 40, + "arguments": [{"type": "short", "name": "reply-code"}, + {"type": "shortstr", "name": "reply-text", "default-value": ""}, + {"type": "short", "name": "class-id"}, + {"type": "short", "name": "method-id"}], + "name": "close", + "synchronous" : true}, + {"id": 41, + "arguments": [], + "name": "close-ok"}], + "name": "channel" + }, + { + "id": 30, + "methods": [{"id": 10, + "arguments": [{"type": "shortstr", "name": "realm", "default-value": "/data"}, + {"type": "bit", "name": "exclusive", "default-value": false}, + {"type": "bit", "name": "passive", "default-value": true}, + {"type": "bit", "name": "active", "default-value": true}, + {"type": "bit", "name": "write", "default-value": true}, + {"type": "bit", "name": "read", "default-value": true}], + "name": "request", + "synchronous" : true}, + {"id": 11, + "arguments": [{"type": "short", "name": "ticket", "default-value": 1}], + "name": "request-ok"}], + "name": "access" + }, + { + "id": 40, + "methods": [{"id": 10, + "arguments": [{"type": "short", "name": "ticket", "default-value": 1}, + {"type": "shortstr", "name": "exchange"}, + {"type": "shortstr", "name": "type", "default-value": "direct"}, + {"type": "bit", "name": "passive", "default-value": false}, + {"type": "bit", "name": "durable", "default-value": false}, + {"type": "bit", "name": "auto-delete", "default-value": false}, + {"type": "bit", "name": "internal", "default-value": false}, + {"type": "bit", "name": "nowait", "default-value": false}, + {"type": "table", "name": "arguments", "default-value": {}}], + "name": "declare", + "synchronous" : true}, + {"id": 11, + "arguments": [], + "name": "declare-ok"}, + {"id": 20, + "arguments": [{"type": "short", "name": "ticket", "default-value": 1}, + {"type": "shortstr", "name": "exchange"}, + {"type": "bit", "name": "if-unused", "default-value": false}, + {"type": "bit", "name": "nowait", "default-value": false}], + "name": "delete", + "synchronous" : true}, + {"id": 21, + "arguments": [], + "name": "delete-ok"}], + "name": "exchange" + }, + { + "id": 50, + "methods": [{"id": 10, + "arguments": [{"type": "short", "name": "ticket", "default-value": 1}, + {"type": "shortstr", "name": "queue", "default-value": ""}, + {"type": "bit", "name": "passive", "default-value": false}, + {"type": "bit", "name": "durable", "default-value": false}, + {"type": "bit", "name": "exclusive", "default-value": false}, + {"type": "bit", "name": "auto-delete", "default-value": false}, + {"type": "bit", "name": "nowait", "default-value": false}, + {"type": "table", "name": "arguments", "default-value": {}}], + "name": "declare", + "synchronous" : true}, + {"id": 11, + "arguments": [{"type": "shortstr", "name": "queue"}, + {"type": "long", "name": "message-count"}, + {"type": "long", "name": "consumer-count"}], + "name": "declare-ok"}, + {"id": 20, + "arguments": [{"type": "short", "name": "ticket", "default-value": 1}, + {"type": "shortstr", "name": "queue", "default-value": ""}, + {"type": "shortstr", "name": "exchange"}, + {"type": "shortstr", "name": "routing-key", "default-value": ""}, + {"type": "bit", "name": "nowait", "default-value": false}, + {"type": "table", "name": "arguments", "default-value": {}}], + "name": "bind", + "synchronous" : true}, + {"id": 21, + "arguments": [], + "name": "bind-ok"}, + {"id": 30, + "arguments": [{"type": "short", "name": "ticket", "default-value": 1}, + {"type": "shortstr", "name": "queue", "default-value": ""}, + {"type": "bit", "name": "nowait", "default-value": false}], + "name": "purge", + "synchronous" : true}, + {"id": 31, + "arguments": [{"type": "long", "name": "message-count"}], + "name": "purge-ok"}, + {"id": 40, + "arguments": [{"type": "short", "name": "ticket", "default-value": 1}, + {"type": "shortstr", "name": "queue", "default-value": ""}, + {"type": "bit", "name": "if-unused", "default-value": false}, + {"type": "bit", "name": "if-empty", "default-value": false}, + {"type": "bit", "name": "nowait", "default-value": false}], + "name": "delete", + "synchronous" : true}, + {"id": 41, + "arguments": [{"type": "long", "name": "message-count"}], + "name": "delete-ok"}, + {"id": 50, + "arguments": [{"type": "short", "name": "ticket", "default-value": 1}, + {"type": "shortstr", "name": "queue", "default-value": ""}, + {"type": "shortstr", "name": "exchange"}, + {"type": "shortstr", "name": "routing-key", "default-value": ""}, + {"type": "table", "name": "arguments", "default-value": {}}], + "name": "unbind", + "synchronous" : true}, + {"id": 51, + "arguments": [], + "name": "unbind-ok"} + ], + "name": "queue" + }, + { + "id": 60, + "methods": [{"id": 10, + "arguments": [{"type": "long", "name": "prefetch-size", "default-value": 0}, + {"type": "short", "name": "prefetch-count", "default-value": 0}, + {"type": "bit", "name": "global", "default-value": false}], + "name": "qos", + "synchronous" : true}, + {"id": 11, + "arguments": [], + "name": "qos-ok"}, + {"id": 20, + "arguments": [{"domain": "access-ticket", "name": "ticket", "default-value": 1}, + {"type": "shortstr", "name": "queue", "default-value": ""}, + {"type": "shortstr", "name": "consumer-tag", "default-value": ""}, + {"type": "bit", "name": "no-local", "default-value": false}, + {"type": "bit", "name": "no-ack", "default-value": false}, + {"type": "bit", "name": "exclusive", "default-value": false}, + {"type": "bit", "name": "nowait", "default-value": false}], + "name": "consume", + "synchronous" : true}, + {"id": 21, + "arguments": [{"type": "shortstr", "name": "consumer-tag"}], + "name": "consume-ok"}, + {"id": 30, + "arguments": [{"type": "shortstr", "name": "consumer-tag"}, + {"type": "bit", "name": "nowait", "default-value": false}], + "name": "cancel", + "synchronous" : true}, + {"id": 31, + "arguments": [{"type": "shortstr", "name": "consumer-tag"}], + "name": "cancel-ok"}, + {"content": true, + "id": 40, + "arguments": [{"type": "short", "name": "ticket", "default-value": 1}, + {"type": "shortstr", "name": "exchange", "default-value": ""}, + {"type": "shortstr", "name": "routing-key", "default-value": ""}, + {"type": "bit", "name": "mandatory", "default-value": false}, + {"type": "bit", "name": "immediate", "default-value": false}], + "name": "publish"}, + {"content": true, + "id": 50, + "arguments": [{"type": "short", "name": "reply-code"}, + {"type": "shortstr", "name": "reply-text", "default-value": ""}, + {"type": "shortstr", "name": "exchange"}, + {"type": "shortstr", "name": "routing-key"}], + "name": "return"}, + {"content": true, + "id": 60, + "arguments": [{"type": "shortstr", "name": "consumer-tag"}, + {"type": "longlong", "name": "delivery-tag"}, + {"type": "bit", "name": "redelivered", "default-value": false}, + {"type": "shortstr", "name": "exchange"}, + {"type": "shortstr", "name": "routing-key"}], + "name": "deliver"}, + {"id": 70, + "arguments": [{"type": "short", "name": "ticket", "default-value": 1}, + {"type": "shortstr", "name": "queue", "default-value": ""}, + {"type": "bit", "name": "no-ack", "default-value": false}], + "name": "get", + "synchronous" : true}, + {"content": true, + "id": 71, + "arguments": [{"type": "longlong", "name": "delivery-tag"}, + {"type": "bit", "name": "redelivered", "default-value": false}, + {"type": "shortstr", "name": "exchange"}, + {"type": "shortstr", "name": "routing-key"}, + {"type": "long", "name": "message-count"}], + "name": "get-ok"}, + {"id": 72, + "arguments": [{"type": "shortstr", "name": "cluster-id", "default-value": ""}], + "name": "get-empty"}, + {"id": 80, + "arguments": [{"type": "longlong", "name": "delivery-tag", "default-value": 0}, + {"type": "bit", "name": "multiple", "default-value": false}], + "name": "ack"}, + {"id": 90, + "arguments": [{"type": "longlong", "name": "delivery-tag"}, + {"type": "bit", "name": "requeue", "default-value": true}], + "name": "reject"}, + {"id": 100, + "arguments": [{"type": "bit", "name": "requeue", "default-value": false}], + "name": "recover-async"}, + {"id": 110, + "arguments": [{"type": "bit", "name": "requeue", "default-value": false}], + "name": "recover", + "synchronous" : true}, + {"id": 111, + "arguments": [], + "name": "recover-ok"}], + "name": "basic", + "properties": [{"type": "shortstr", "name": "content-type"}, + {"type": "shortstr", "name": "content-encoding"}, + {"type": "table", "name": "headers"}, + {"type": "octet", "name": "delivery-mode"}, + {"type": "octet", "name": "priority"}, + {"type": "shortstr", "name": "correlation-id"}, + {"type": "shortstr", "name": "reply-to"}, + {"type": "shortstr", "name": "expiration"}, + {"type": "shortstr", "name": "message-id"}, + {"type": "timestamp", "name": "timestamp"}, + {"type": "shortstr", "name": "type"}, + {"type": "shortstr", "name": "user-id"}, + {"type": "shortstr", "name": "app-id"}, + {"type": "shortstr", "name": "cluster-id"}] + }, + { + "id": 70, + "methods": [{"id": 10, + "arguments": [{"type": "long", "name": "prefetch-size", "default-value": 0}, + {"type": "short", "name": "prefetch-count", "default-value": 0}, + {"type": "bit", "name": "global", "default-value": false}], + "name": "qos", + "synchronous" : true}, + {"id": 11, + "arguments": [], + "name": "qos-ok"}, + {"id": 20, + "arguments": [{"type": "short", "name": "ticket", "default-value": 1}, + {"type": "shortstr", "name": "queue", "default-value": ""}, + {"type": "shortstr", "name": "consumer-tag", "default-value": ""}, + {"type": "bit", "name": "no-local", "default-value": false}, + {"type": "bit", "name": "no-ack", "default-value": false}, + {"type": "bit", "name": "exclusive", "default-value": false}, + {"type": "bit", "name": "nowait", "default-value": false}], + "name": "consume", + "synchronous" : true}, + {"id": 21, + "arguments": [{"type": "shortstr", "name": "consumer-tag"}], + "name": "consume-ok"}, + {"id": 30, + "arguments": [{"type": "shortstr", "name": "consumer-tag"}, + {"type": "bit", "name": "nowait", "default-value": false}], + "name": "cancel", + "synchronous" : true}, + {"id": 31, + "arguments": [{"type": "shortstr", "name": "consumer-tag"}], + "name": "cancel-ok"}, + {"id": 40, + "arguments": [{"type": "shortstr", "name": "identifier"}, + {"type": "longlong", "name": "content-size"}], + "name": "open", + "synchronous" : true}, + {"id": 41, + "arguments": [{"type": "longlong", "name": "staged-size"}], + "name": "open-ok"}, + {"content": true, + "id": 50, + "arguments": [], + "name": "stage"}, + {"id": 60, + "arguments": [{"type": "short", "name": "ticket", "default-value": 1}, + {"type": "shortstr", "name": "exchange", "default-value": ""}, + {"type": "shortstr", "name": "routing-key", "default-value": ""}, + {"type": "bit", "name": "mandatory", "default-value": false}, + {"type": "bit", "name": "immediate", "default-value": false}, + {"type": "shortstr", "name": "identifier"}], + "name": "publish"}, + {"content": true, + "id": 70, + "arguments": [{"type": "short", "name": "reply-code", "default-value": 200}, + {"type": "shortstr", "name": "reply-text", "default-value": ""}, + {"type": "shortstr", "name": "exchange"}, + {"type": "shortstr", "name": "routing-key"}], + "name": "return"}, + {"id": 80, + "arguments": [{"type": "shortstr", "name": "consumer-tag"}, + {"type": "longlong", "name": "delivery-tag"}, + {"type": "bit", "name": "redelivered", "default-value": false}, + {"type": "shortstr", "name": "exchange"}, + {"type": "shortstr", "name": "routing-key"}, + {"type": "shortstr", "name": "identifier"}], + "name": "deliver"}, + {"id": 90, + "arguments": [{"type": "longlong", "name": "delivery-tag", "default-value": 0}, + {"type": "bit", "name": "multiple", "default-value": false}], + "name": "ack"}, + {"id": 100, + "arguments": [{"type": "longlong", "name": "delivery-tag"}, + {"type": "bit", "name": "requeue", "default-value": true}], + "name": "reject"}], + "name": "file", + "properties": [{"type": "shortstr", "name": "content-type"}, + {"type": "shortstr", "name": "content-encoding"}, + {"type": "table", "name": "headers"}, + {"type": "octet", "name": "priority"}, + {"type": "shortstr", "name": "reply-to"}, + {"type": "shortstr", "name": "message-id"}, + {"type": "shortstr", "name": "filename"}, + {"type": "timestamp", "name": "timestamp"}, + {"type": "shortstr", "name": "cluster-id"}] + }, + { + "id": 80, + "methods": [{"id": 10, + "arguments": [{"type": "long", "name": "prefetch-size", "default-value": 0}, + {"type": "short", "name": "prefetch-count", "default-value": 0}, + {"type": "long", "name": "consume-rate", "default-value": 0}, + {"type": "bit", "name": "global", "default-value": false}], + "name": "qos", + "synchronous" : true}, + {"id": 11, + "arguments": [], + "name": "qos-ok"}, + {"id": 20, + "arguments": [{"type": "short", "name": "ticket", "default-value": 1}, + {"type": "shortstr", "name": "queue", "default-value": ""}, + {"type": "shortstr", "name": "consumer-tag", "default-value": ""}, + {"type": "bit", "name": "no-local", "default-value": false}, + {"type": "bit", "name": "exclusive", "default-value": false}, + {"type": "bit", "name": "nowait", "default-value": false}], + "name": "consume", + "synchronous" : true}, + {"id": 21, + "arguments": [{"type": "shortstr", "name": "consumer-tag"}], + "name": "consume-ok"}, + {"id": 30, + "arguments": [{"type": "shortstr", "name": "consumer-tag"}, + {"type": "bit", "name": "nowait", "default-value": false}], + "name": "cancel", + "synchronous" : true}, + {"id": 31, + "arguments": [{"type": "shortstr", "name": "consumer-tag"}], + "name": "cancel-ok"}, + {"content": true, + "id": 40, + "arguments": [{"type": "short", "name": "ticket", "default-value": 1}, + {"type": "shortstr", "name": "exchange", "default-value": ""}, + {"type": "shortstr", "name": "routing-key", "default-value": ""}, + {"type": "bit", "name": "mandatory", "default-value": false}, + {"type": "bit", "name": "immediate", "default-value": false}], + "name": "publish"}, + {"content": true, + "id": 50, + "arguments": [{"type": "short", "name": "reply-code", "default-value": 200}, + {"type": "shortstr", "name": "reply-text", "default-value": ""}, + {"type": "shortstr", "name": "exchange"}, + {"type": "shortstr", "name": "routing-key"}], + "name": "return"}, + {"content": true, + "id": 60, + "arguments": [{"type": "shortstr", "name": "consumer-tag"}, + {"type": "longlong", "name": "delivery-tag"}, + {"type": "shortstr", "name": "exchange"}, + {"type": "shortstr", "name": "queue"}], + "name": "deliver"}], + "name": "stream", + "properties": [{"type": "shortstr", "name": "content-type"}, + {"type": "shortstr", "name": "content-encoding"}, + {"type": "table", "name": "headers"}, + {"type": "octet", "name": "priority"}, + {"type": "timestamp", "name": "timestamp"}] + }, + { + "id": 90, + "methods": [{"id": 10, + "arguments": [], + "name": "select", + "synchronous" : true}, + {"id": 11, + "arguments": [], + "name": "select-ok"}, + {"id": 20, + "arguments": [], + "name": "commit", + "synchronous" : true}, + {"id": 21, + "arguments": [], + "name": "commit-ok"}, + {"id": 30, + "arguments": [], + "name": "rollback", + "synchronous" : true}, + {"id": 31, + "arguments": [], + "name": "rollback-ok"}], + "name": "tx" + }, + { + "id": 100, + "methods": [{"id": 10, + "arguments": [], + "name": "select", + "synchronous" : true}, + {"id": 11, + "arguments": [], + "name": "select-ok"}, + {"id": 20, + "arguments": [{"type": "shortstr", "name": "dtx-identifier"}], + "name": "start", + "synchronous" : true}, + {"id": 21, + "arguments": [], "name": "start-ok"}], + "name": "dtx" + }, + { + "id": 110, + "methods": [{"content": true, + "id": 10, + "arguments": [{"type": "table", "name": "meta-data"}], + "name": "request"}], + "name": "tunnel", + "properties": [{"type": "table", "name": "headers"}, + {"type": "shortstr", "name": "proxy-name"}, + {"type": "shortstr", "name": "data-name"}, + {"type": "octet", "name": "durable"}, + {"type": "octet", "name": "broadcast"}] + }, + { + "id": 120, + "methods": [{"id": 10, + "arguments": [{"type": "octet", "name": "integer-1"}, + {"type": "short", "name": "integer-2"}, + {"type": "long", "name": "integer-3"}, + {"type": "longlong", "name": "integer-4"}, + {"type": "octet", "name": "operation"}], + "name": "integer", + "synchronous" : true}, + {"id": 11, + "arguments": [{"type": "longlong", "name": "result"}], + "name": "integer-ok"}, + {"id": 20, + "arguments": [{"type": "shortstr", "name": "string-1"}, + {"type": "longstr", "name": "string-2"}, + {"type": "octet", "name": "operation"}], + "name": "string", + "synchronous" : true}, + {"id": 21, + "arguments": [{"type": "longstr", "name": "result"}], + "name": "string-ok"}, + {"id": 30, + "arguments": [{"type": "table", "name": "table"}, + {"type": "octet", "name": "integer-op"}, + {"type": "octet", "name": "string-op"}], + "name": "table", + "synchronous" : true}, + {"id": 31, + "arguments": [{"type": "longlong", "name": "integer-result"}, + {"type": "longstr", "name": "string-result"}], + "name": "table-ok"}, + {"content": true, + "id": 40, + "arguments": [], + "name": "content", + "synchronous" : true}, + {"content": true, + "id": 41, + "arguments": [{"type": "long", "name": "content-checksum"}], + "name": "content-ok"}], + "name": "test" + } + ] +} diff -Nru rabbitmq-server-1.7.2/codegen/amqp-rabbitmq-0.9.1.json rabbitmq-server-2.3.1/codegen/amqp-rabbitmq-0.9.1.json --- rabbitmq-server-1.7.2/codegen/amqp-rabbitmq-0.9.1.json 1970-01-01 00:00:00.000000000 +0000 +++ rabbitmq-server-2.3.1/codegen/amqp-rabbitmq-0.9.1.json 2011-02-03 12:47:36.000000000 +0000 @@ -0,0 +1,467 @@ +{ + "name": "AMQP", + "major-version": 0, + "minor-version": 9, + "revision": 1, + "port": 5672, + "copyright": [ + "Copyright (C) 2008-2011 VMware, Inc.\n", + "\n", + "Permission is hereby granted, free of charge, to any person\n", + "obtaining a copy of this file (the \"Software\"), to deal in the\n", + "Software without restriction, including without limitation the \n", + "rights to use, copy, modify, merge, publish, distribute, \n", + "sublicense, and/or sell copies of the Software, and to permit \n", + "persons to whom the Software is furnished to do so, subject to \n", + "the following conditions:\n", + "\n", + "The above copyright notice and this permission notice shall be\n", + "included in all copies or substantial portions of the Software.\n", + "\n", + "THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n", + "EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES\n", + "OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\n", + "NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\n", + "HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,\n", + "WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n", + "FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR\n", + "OTHER DEALINGS IN THE SOFTWARE.\n", + "\n", + "Class information entered from amqp_xml0-8.pdf and domain types from amqp-xml-doc0-9.pdf\n", + "Updated for 0-9-1 by Tony Garnock-Jones\n", + "\n", + "b3cb053f15e7b98808c0ccc67f23cb3e amqp_xml0-8.pdf\n", + "http://www.twiststandards.org/index.php?option=com_docman&task=cat_view&gid=28&&Itemid=90\n", + "8444db91e2949dbecfb2585e9eef6d64 amqp-xml-doc0-9.pdf\n", + "https://jira.amqp.org/confluence/download/attachments/720900/amqp-xml-doc0-9.pdf?version=1\n"], + + "domains": [ + ["bit", "bit"], + ["channel-id", "longstr"], + ["class-id", "short"], + ["consumer-tag", "shortstr"], + ["delivery-tag", "longlong"], + ["destination", "shortstr"], + ["duration", "longlong"], + ["exchange-name", "shortstr"], + ["long", "long"], + ["longlong", "longlong"], + ["longstr", "longstr"], + ["method-id", "short"], + ["no-ack", "bit"], + ["no-local", "bit"], + ["octet", "octet"], + ["offset", "longlong"], + ["path", "shortstr"], + ["peer-properties", "table"], + ["queue-name", "shortstr"], + ["redelivered", "bit"], + ["reference", "longstr"], + ["reject-code", "short"], + ["reject-text", "shortstr"], + ["reply-code", "short"], + ["reply-text", "shortstr"], + ["security-token", "longstr"], + ["short", "short"], + ["shortstr", "shortstr"], + ["table", "table"], + ["timestamp", "timestamp"] + ], + + "constants": [ + {"name": "FRAME-METHOD", "value": 1}, + {"name": "FRAME-HEADER", "value": 2}, + {"name": "FRAME-BODY", "value": 3}, + {"name": "FRAME-HEARTBEAT", "value": 8}, + {"name": "FRAME-MIN-SIZE", "value": 4096}, + {"name": "FRAME-END", "value": 206}, + {"name": "REPLY-SUCCESS", "value": 200}, + {"name": "CONTENT-TOO-LARGE", "value": 311, "class": "soft-error"}, + {"name": "NO-ROUTE", "value": 312, "class": "soft-error"}, + {"name": "NO-CONSUMERS", "value": 313, "class": "soft-error"}, + {"name": "ACCESS-REFUSED", "value": 403, "class": "soft-error"}, + {"name": "NOT-FOUND", "value": 404, "class": "soft-error"}, + {"name": "RESOURCE-LOCKED", "value": 405, "class": "soft-error"}, + {"name": "PRECONDITION-FAILED", "value": 406, "class": "soft-error"}, + {"name": "CONNECTION-FORCED", "value": 320, "class": "hard-error"}, + {"name": "INVALID-PATH", "value": 402, "class": "hard-error"}, + {"name": "FRAME-ERROR", "value": 501, "class": "hard-error"}, + {"name": "SYNTAX-ERROR", "value": 502, "class": "hard-error"}, + {"name": "COMMAND-INVALID", "value": 503, "class": "hard-error"}, + {"name": "CHANNEL-ERROR", "value": 504, "class": "hard-error"}, + {"name": "UNEXPECTED-FRAME", "value": 505, "class": "hard-error"}, + {"name": "RESOURCE-ERROR", "value": 506, "class": "hard-error"}, + {"name": "NOT-ALLOWED", "value": 530, "class": "hard-error"}, + {"name": "NOT-IMPLEMENTED", "value": 540, "class": "hard-error"}, + {"name": "INTERNAL-ERROR", "value": 541, "class": "hard-error"} + ], + + "classes": [ + { + "id": 10, + "methods": [{"id": 10, + "arguments": [{"type": "octet", "name": "version-major", "default-value": 0}, + {"type": "octet", "name": "version-minor", "default-value": 9}, + {"domain": "peer-properties", "name": "server properties"}, + {"type": "longstr", "name": "mechanisms", "default-value": "PLAIN"}, + {"type": "longstr", "name": "locales", "default-value": "en_US"}], + "name": "start", + "synchronous" : true}, + {"id": 11, + "arguments": [{"domain": "peer-properties", "name": "client-properties"}, + {"type": "shortstr", "name": "mechanism", "default-value": "PLAIN"}, + {"type": "longstr", "name": "response"}, + {"type": "shortstr", "name": "locale", "default-value": "en_US"}], + "name": "start-ok"}, + {"id": 20, + "arguments": [{"type": "longstr", "name": "challenge"}], + "name": "secure", + "synchronous" : true}, + {"id": 21, + "arguments": [{"type": "longstr", "name": "response"}], + "name": "secure-ok"}, + {"id": 30, + "arguments": [{"type": "short", "name": "channel-max", "default-value": 0}, + {"type": "long", "name": "frame-max", "default-value": 0}, + {"type": "short", "name": "heartbeat", "default-value": 0}], + "name": "tune", + "synchronous" : true}, + {"id": 31, + "arguments": [{"type": "short", "name": "channel-max", "default-value": 0}, + {"type": "long", "name": "frame-max", "default-value": 0}, + {"type": "short", "name": "heartbeat", "default-value": 0}], + "name": "tune-ok"}, + {"id": 40, + "arguments": [{"type": "shortstr", "name": "virtual-host", "default-value": "/"}, + {"type": "shortstr", "name": "capabilities", "default-value": ""}, + {"type": "bit", "name": "insist", "default-value": false}], + "name": "open", + "synchronous" : true}, + {"id": 41, + "arguments": [{"type": "shortstr", "name": "known-hosts", "default-value": ""}], + "name": "open-ok"}, + {"id": 50, + "arguments": [{"type": "short", "name": "reply-code"}, + {"type": "shortstr", "name": "reply-text", "default-value": ""}, + {"type": "short", "name": "class-id"}, + {"type": "short", "name": "method-id"}], + "name": "close", + "synchronous" : true}, + {"id": 51, + "arguments": [], + "name": "close-ok"}], + "name": "connection", + "properties": [] + }, + { + "id": 20, + "methods": [{"id": 10, + "arguments": [{"type": "shortstr", "name": "out-of-band", "default-value": ""}], + "name": "open", + "synchronous" : true}, + {"id": 11, + "arguments": [{"type": "longstr", "name": "channel-id", "default-value": ""}], + "name": "open-ok"}, + {"id": 20, + "arguments": [{"type": "bit", "name": "active"}], + "name": "flow", + "synchronous" : true}, + {"id": 21, + "arguments": [{"type": "bit", "name": "active"}], + "name": "flow-ok"}, + {"id": 40, + "arguments": [{"type": "short", "name": "reply-code"}, + {"type": "shortstr", "name": "reply-text", "default-value": ""}, + {"type": "short", "name": "class-id"}, + {"type": "short", "name": "method-id"}], + "name": "close", + "synchronous" : true}, + {"id": 41, + "arguments": [], + "name": "close-ok"}], + "name": "channel" + }, + { + "id": 30, + "methods": [{"id": 10, + "arguments": [{"type": "shortstr", "name": "realm", "default-value": "/data"}, + {"type": "bit", "name": "exclusive", "default-value": false}, + {"type": "bit", "name": "passive", "default-value": true}, + {"type": "bit", "name": "active", "default-value": true}, + {"type": "bit", "name": "write", "default-value": true}, + {"type": "bit", "name": "read", "default-value": true}], + "name": "request", + "synchronous" : true}, + {"id": 11, + "arguments": [{"type": "short", "name": "ticket", "default-value": 1}], + "name": "request-ok"}], + "name": "access" + }, + { + "id": 40, + "methods": [{"id": 10, + "arguments": [{"type": "short", "name": "ticket", "default-value": 0}, + {"type": "shortstr", "name": "exchange"}, + {"type": "shortstr", "name": "type", "default-value": "direct"}, + {"type": "bit", "name": "passive", "default-value": false}, + {"type": "bit", "name": "durable", "default-value": false}, + {"type": "bit", "name": "auto-delete", "default-value": false}, + {"type": "bit", "name": "internal", "default-value": false}, + {"type": "bit", "name": "nowait", "default-value": false}, + {"type": "table", "name": "arguments", "default-value": {}}], + "name": "declare", + "synchronous" : true}, + {"id": 11, + "arguments": [], + "name": "declare-ok"}, + {"id": 20, + "arguments": [{"type": "short", "name": "ticket", "default-value": 0}, + {"type": "shortstr", "name": "exchange"}, + {"type": "bit", "name": "if-unused", "default-value": false}, + {"type": "bit", "name": "nowait", "default-value": false}], + "name": "delete", + "synchronous" : true}, + {"id": 21, + "arguments": [], + "name": "delete-ok"}, + {"id": 30, + "arguments": [{"type": "short", "name": "ticket", "default-value": 0}, + {"type": "shortstr", "name": "destination"}, + {"type": "shortstr", "name": "source"}, + {"type": "shortstr", "name": "routing-key", "default-value": ""}, + {"type": "bit", "name": "nowait", "default-value": false}, + {"type": "table", "name": "arguments", "default-value": {}}], + "name": "bind", + "synchronous" : true}, + {"id": 31, + "arguments": [], + "name": "bind-ok"}, + {"id": 40, + "arguments": [{"type": "short", "name": "ticket", "default-value": 0}, + {"type": "shortstr", "name": "destination"}, + {"type": "shortstr", "name": "source"}, + {"type": "shortstr", "name": "routing-key", "default-value": ""}, + {"type": "bit", "name": "nowait", "default-value": false}, + {"type": "table", "name": "arguments", "default-value": {}}], + "name": "unbind", + "synchronous" : true}, + {"id": 51, + "arguments": [], + "name": "unbind-ok"}], + "name": "exchange" + }, + { + "id": 50, + "methods": [{"id": 10, + "arguments": [{"type": "short", "name": "ticket", "default-value": 0}, + {"type": "shortstr", "name": "queue", "default-value": ""}, + {"type": "bit", "name": "passive", "default-value": false}, + {"type": "bit", "name": "durable", "default-value": false}, + {"type": "bit", "name": "exclusive", "default-value": false}, + {"type": "bit", "name": "auto-delete", "default-value": false}, + {"type": "bit", "name": "nowait", "default-value": false}, + {"type": "table", "name": "arguments", "default-value": {}}], + "name": "declare", + "synchronous" : true}, + {"id": 11, + "arguments": [{"type": "shortstr", "name": "queue"}, + {"type": "long", "name": "message-count"}, + {"type": "long", "name": "consumer-count"}], + "name": "declare-ok"}, + {"id": 20, + "arguments": [{"type": "short", "name": "ticket", "default-value": 0}, + {"type": "shortstr", "name": "queue", "default-value": ""}, + {"type": "shortstr", "name": "exchange"}, + {"type": "shortstr", "name": "routing-key", "default-value": ""}, + {"type": "bit", "name": "nowait", "default-value": false}, + {"type": "table", "name": "arguments", "default-value": {}}], + "name": "bind", + "synchronous" : true}, + {"id": 21, + "arguments": [], + "name": "bind-ok"}, + {"id": 30, + "arguments": [{"type": "short", "name": "ticket", "default-value": 0}, + {"type": "shortstr", "name": "queue", "default-value": ""}, + {"type": "bit", "name": "nowait", "default-value": false}], + "name": "purge", + "synchronous" : true}, + {"id": 31, + "arguments": [{"type": "long", "name": "message-count"}], + "name": "purge-ok"}, + {"id": 40, + "arguments": [{"type": "short", "name": "ticket", "default-value": 0}, + {"type": "shortstr", "name": "queue", "default-value": ""}, + {"type": "bit", "name": "if-unused", "default-value": false}, + {"type": "bit", "name": "if-empty", "default-value": false}, + {"type": "bit", "name": "nowait", "default-value": false}], + "name": "delete", + "synchronous" : true}, + {"id": 41, + "arguments": [{"type": "long", "name": "message-count"}], + "name": "delete-ok"}, + {"id": 50, + "arguments": [{"type": "short", "name": "ticket", "default-value": 0}, + {"type": "shortstr", "name": "queue", "default-value": ""}, + {"type": "shortstr", "name": "exchange"}, + {"type": "shortstr", "name": "routing-key", "default-value": ""}, + {"type": "table", "name": "arguments", "default-value": {}}], + "name": "unbind", + "synchronous" : true}, + {"id": 51, + "arguments": [], + "name": "unbind-ok"} + ], + "name": "queue" + }, + { + "id": 60, + "methods": [{"id": 10, + "arguments": [{"type": "long", "name": "prefetch-size", "default-value": 0}, + {"type": "short", "name": "prefetch-count", "default-value": 0}, + {"type": "bit", "name": "global", "default-value": false}], + "name": "qos", + "synchronous" : true}, + {"id": 11, + "arguments": [], + "name": "qos-ok"}, + {"id": 20, + "arguments": [{"domain": "short", "name": "ticket", "default-value": 0}, + {"type": "shortstr", "name": "queue", "default-value": ""}, + {"type": "shortstr", "name": "consumer-tag", "default-value": ""}, + {"type": "bit", "name": "no-local", "default-value": false}, + {"type": "bit", "name": "no-ack", "default-value": false}, + {"type": "bit", "name": "exclusive", "default-value": false}, + {"type": "bit", "name": "nowait", "default-value": false}, + {"type": "table", "name": "arguments", "default-value": {}}], + "name": "consume", + "synchronous" : true}, + {"id": 21, + "arguments": [{"type": "shortstr", "name": "consumer-tag"}], + "name": "consume-ok"}, + {"id": 30, + "arguments": [{"type": "shortstr", "name": "consumer-tag"}, + {"type": "bit", "name": "nowait", "default-value": false}], + "name": "cancel", + "synchronous" : true}, + {"id": 31, + "arguments": [{"type": "shortstr", "name": "consumer-tag"}], + "name": "cancel-ok"}, + {"content": true, + "id": 40, + "arguments": [{"type": "short", "name": "ticket", "default-value": 0}, + {"type": "shortstr", "name": "exchange", "default-value": ""}, + {"type": "shortstr", "name": "routing-key", "default-value": ""}, + {"type": "bit", "name": "mandatory", "default-value": false}, + {"type": "bit", "name": "immediate", "default-value": false}], + "name": "publish"}, + {"content": true, + "id": 50, + "arguments": [{"type": "short", "name": "reply-code"}, + {"type": "shortstr", "name": "reply-text", "default-value": ""}, + {"type": "shortstr", "name": "exchange"}, + {"type": "shortstr", "name": "routing-key"}], + "name": "return"}, + {"content": true, + "id": 60, + "arguments": [{"type": "shortstr", "name": "consumer-tag"}, + {"type": "longlong", "name": "delivery-tag"}, + {"type": "bit", "name": "redelivered", "default-value": false}, + {"type": "shortstr", "name": "exchange"}, + {"type": "shortstr", "name": "routing-key"}], + "name": "deliver"}, + {"id": 70, + "arguments": [{"type": "short", "name": "ticket", "default-value": 0}, + {"type": "shortstr", "name": "queue", "default-value": ""}, + {"type": "bit", "name": "no-ack", "default-value": false}], + "name": "get", + "synchronous" : true}, + {"content": true, + "id": 71, + "arguments": [{"type": "longlong", "name": "delivery-tag"}, + {"type": "bit", "name": "redelivered", "default-value": false}, + {"type": "shortstr", "name": "exchange"}, + {"type": "shortstr", "name": "routing-key"}, + {"type": "long", "name": "message-count"}], + "name": "get-ok"}, + {"id": 72, + "arguments": [{"type": "shortstr", "name": "cluster-id", "default-value": ""}], + "name": "get-empty"}, + {"id": 80, + "arguments": [{"type": "longlong", "name": "delivery-tag", "default-value": 0}, + {"type": "bit", "name": "multiple", "default-value": false}], + "name": "ack"}, + {"id": 90, + "arguments": [{"type": "longlong", "name": "delivery-tag"}, + {"type": "bit", "name": "requeue", "default-value": true}], + "name": "reject"}, + {"id": 100, + "arguments": [{"type": "bit", "name": "requeue", "default-value": false}], + "name": "recover-async"}, + {"id": 110, + "arguments": [{"type": "bit", "name": "requeue", "default-value": false}], + "name": "recover", + "synchronous" : true}, + {"id": 111, + "arguments": [], + "name": "recover-ok"}, + {"id": 120, + "arguments": [{"type": "longlong", "name": "delivery-tag", "default-value": 0}, + {"type": "bit", "name": "multiple", "default-value": false}, + {"type": "bit", "name": "requeue", "default-value": true}], + "name": "nack"}], + "name": "basic", + "properties": [{"type": "shortstr", "name": "content-type"}, + {"type": "shortstr", "name": "content-encoding"}, + {"type": "table", "name": "headers"}, + {"type": "octet", "name": "delivery-mode"}, + {"type": "octet", "name": "priority"}, + {"type": "shortstr", "name": "correlation-id"}, + {"type": "shortstr", "name": "reply-to"}, + {"type": "shortstr", "name": "expiration"}, + {"type": "shortstr", "name": "message-id"}, + {"type": "timestamp", "name": "timestamp"}, + {"type": "shortstr", "name": "type"}, + {"type": "shortstr", "name": "user-id"}, + {"type": "shortstr", "name": "app-id"}, + {"type": "shortstr", "name": "cluster-id"}] + }, + { + "id": 90, + "methods": [{"id": 10, + "arguments": [], + "name": "select", + "synchronous" : true}, + {"id": 11, + "arguments": [], + "name": "select-ok"}, + {"id": 20, + "arguments": [], + "name": "commit", + "synchronous" : true}, + {"id": 21, + "arguments": [], + "name": "commit-ok"}, + {"id": 30, + "arguments": [], + "name": "rollback", + "synchronous" : true}, + {"id": 31, + "arguments": [], + "name": "rollback-ok"}], + "name": "tx" + }, + { + "id": 85, + "methods": [{"id": 10, + "arguments": [ + {"type": "bit", "name": "nowait", "default-value": false}], + "name": "select", + "synchronous": true}, + {"id": 11, + "arguments": [], + "name": "select-ok"}], + "name": "confirm" + } + ] +} diff -Nru rabbitmq-server-1.7.2/codegen/demo_extension.json rabbitmq-server-2.3.1/codegen/demo_extension.json --- rabbitmq-server-1.7.2/codegen/demo_extension.json 1970-01-01 00:00:00.000000000 +0000 +++ rabbitmq-server-2.3.1/codegen/demo_extension.json 2011-02-03 12:47:36.000000000 +0000 @@ -0,0 +1,18 @@ +{ + "extension": { + "name": "demo", + "version": "1.0", + "copyright": "Copyright (C) 2009-2011 VMware, Inc." + }, + "domains": [ + ["foo-domain", "shortstr"] + ], + "constants": [ + {"name": "FOO-CONSTANT", "value": 121212} + ], + "classes": [ + {"name": "demo", + "id": 555, + "methods": [{"name": "one", "id": 1, "arguments": []}]} + ] +} diff -Nru rabbitmq-server-1.7.2/codegen/LICENSE-MPL-RabbitMQ rabbitmq-server-2.3.1/codegen/LICENSE-MPL-RabbitMQ --- rabbitmq-server-1.7.2/codegen/LICENSE-MPL-RabbitMQ 2010-02-15 16:01:13.000000000 +0000 +++ rabbitmq-server-2.3.1/codegen/LICENSE-MPL-RabbitMQ 2011-02-03 12:47:36.000000000 +0000 @@ -446,28 +446,10 @@ The Original Code is RabbitMQ. - The Initial Developers of the Original Code are LShift Ltd, - Cohesive Financial Technologies LLC, and Rabbit Technologies Ltd. - - Portions created before 22-Nov-2008 00:00:00 GMT by LShift Ltd, - Cohesive Financial Technologies LLC, or Rabbit Technologies Ltd - are Copyright (C) 2007-2008 LShift Ltd, Cohesive Financial - Technologies LLC, and Rabbit Technologies Ltd. - - Portions created by LShift Ltd are Copyright (C) 2007-2009 LShift - Ltd. Portions created by Cohesive Financial Technologies LLC are - Copyright (C) 2007-2009 Cohesive Financial Technologies - LLC. Portions created by Rabbit Technologies Ltd are Copyright - (C) 2007-2009 Rabbit Technologies Ltd. - - All Rights Reserved. - - Contributor(s): ______________________________________.'' + The Initial Developer of the Original Code is VMware, Inc. + Copyright (c) 2007-2011 VMware, Inc. All rights reserved.'' [NOTE: The text of this Exhibit A may differ slightly from the text of the notices in the Source Code files of the Original Code. You should use the text of this Exhibit A rather than the text found in the Original Code Source Code for Your Modifications.] - - - diff -Nru rabbitmq-server-1.7.2/codegen/README.extensions.md rabbitmq-server-2.3.1/codegen/README.extensions.md --- rabbitmq-server-1.7.2/codegen/README.extensions.md 1970-01-01 00:00:00.000000000 +0000 +++ rabbitmq-server-2.3.1/codegen/README.extensions.md 2011-02-03 12:47:36.000000000 +0000 @@ -0,0 +1,189 @@ +# Protocol extensions + +The `amqp_codegen.py` AMQP specification compiler has recently been +enhanced to take more than a single specification file, which allows +AMQP library authors to include extensions to the core protocol +without needing to modify the core AMQP specification file as +distributed. + +The compiler is invoked with the path to a single "main" specification +document and zero or more paths to "extension" documents. + +The order of the extensions matters: any later class property +definitions, for instance, are added to the list of definitions in +order of appearance. In general, composition of extensions with a core +specification document is therefore non-commutative. + +## The main document + +Written in the style of a +[json-shapes](http://github.com/tonyg/json-shapes) schema: + + DomainDefinition = _and(array_of(string()), array_length_equals(2)); + + ConstantDefinition = { + "name": string(), + "value": number(), + "class": optional(_or("soft-error", "hard-error")) + }; + + FieldDefinition = { + "name": string(), + "type": string(), + "default-value": optional(anything()) + }; + + MethodDefinition = { + "name": string(), + "id": number(), + "arguments": array_of(FieldDefinition), + "synchronous": optional(boolean()), + "content": optional(boolean()) + }; + + ClassDefinition = { + "name": string(), + "id": number(), + "methods": array_of(MethodDefinition), + "properties": optional(array_of(FieldDefinition)) + }; + + MainDocument = { + "major-version": number(), + "minor-version": number(), + "revision": optional(number()), + "port": number(), + "domains": array_of(DomainDefinition), + "constants": array_of(ConstantDefinition), + "classes": array_of(ClassDefinition), + } + +Within a `FieldDefinition`, the keyword `domain` can be used instead +of `type`, but `type` is preferred and `domain` is deprecated. + +Type names can either be a defined `domain` name or a built-in name +from the following list: + + - octet + - shortstr + - longstr + - short + - long + - longlong + - bit + - table + - timestamp + +Method and class IDs must be integers between 0 and 65535, +inclusive. Note that there is no specific subset of the space reserved +for experimental or site-local extensions, so be careful not to +conflict with IDs used by the AMQP core specification. + +If the `synchronous` field of a `MethodDefinition` is missing, it is +assumed to be `false`; the same applies to the `content` field. + +A `ConstantDefinition` with a `class` attribute is considered to be an +error-code definition; otherwise, it is considered to be a +straightforward numeric constant. + +## Extensions + +Written in the style of a +[json-shapes](http://github.com/tonyg/json-shapes) schema, and +referencing some of the type definitions given above: + + ExtensionDocument = { + "extension": anything(), + "domains": array_of(DomainDefinition), + "constants": array_of(ConstantDefinition), + "classes": array_of(ClassDefinition) + }; + +The `extension` keyword is used to describe the extension informally +for human readers. Typically it will be a dictionary, with members +such as: + + { + "name": "The name of the extension", + "version": "1.0", + "copyright": "Copyright (C) 1234 Yoyodyne, Inc." + } + +## Merge behaviour + +In the case of conflicts between values specified in the main document +and in any extension documents, type-specific merge operators are +invoked. + + - Any doubly-defined domain names are regarded as true + conflicts. Otherwise, all the domain definitions from all the main + and extension documents supplied to the compiler are merged into a + single dictionary. + + - Constant definitions are treated as per domain names above, + *mutatis mutandis*. + + - Classes and their methods are a little trickier: if an extension + defines a class with the same name as one previously defined, then + only the `methods` and `properties` fields of the extension's class + definition are attended to. + + - Any doubly-defined method names or property names within a class + are treated as true conflicts. + + - Properties defined in an extension are added to the end of the + extant property list for the class. + + (Extensions are of course permitted to define brand new classes as + well as to extend existing ones.) + + - Any other kind of conflict leads to a raised + `AmqpSpecFileMergeConflict` exception. + +## Invoking the spec compiler + +Your code generation code should invoke `amqp_codegen.do_main_dict` +with a dictionary of functions as the sole argument. Each will be +used for generationg a separate file. The `do_main_dict` function +will parse the command-line arguments supplied when python was +invoked. + +The command-line will be parsed as: + + python your_codegen.py [ ...] + +where `` is a key into the dictionary supplied to +`do_main_dict` and is used to select which generation function is +called. The `` and `` arguments are file names of +specification documents containing expressions in the syntax given +above. The *final* argument on the command line, ``, is the +name of the source-code file to generate. + +Here's a tiny example of the layout of a code generation module that +uses `amqp_codegen`: + + import amqp_codegen + + def generateHeader(specPath): + spec = amqp_codegen.AmqpSpec(specPath) + ... + + def generateImpl(specPath): + spec = amqp_codegen.AmqpSpec(specPath) + ... + + if __name__ == "__main__": + amqp_codegen.do_main_dict({"header": generateHeader, + "body": generateImpl}) + +The reasons for allowing more than one action, are that + + - many languages have separate "header"-type files (C and Erlang, to + name two) + - `Makefile`s often require separate rules for generating the two + kinds of file, but it's convenient to keep the generation code + together in a single python module + +The main reason things are laid out this way, however, is simply that +it's an accident of the history of the code. We may change the API to +`amqp_codegen` in future to clean things up a little. diff -Nru rabbitmq-server-1.7.2/codegen.py rabbitmq-server-2.3.1/codegen.py --- rabbitmq-server-1.7.2/codegen.py 2010-02-15 16:01:13.000000000 +0000 +++ rabbitmq-server-2.3.1/codegen.py 2011-02-03 12:47:36.000000000 +0000 @@ -1,32 +1,17 @@ -## The contents of this file are subject to the Mozilla Public License -## Version 1.1 (the "License"); you may not use this file except in -## compliance with the License. You may obtain a copy of the License at -## http://www.mozilla.org/MPL/ +## The contents of this file are subject to the Mozilla Public License +## Version 1.1 (the "License"); you may not use this file except in +## compliance with the License. You may obtain a copy of the License +## at http://www.mozilla.org/MPL/ ## -## Software distributed under the License is distributed on an "AS IS" -## basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the -## License for the specific language governing rights and limitations -## under the License. +## Software distributed under the License is distributed on an "AS IS" +## basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +## the License for the specific language governing rights and +## limitations under the License. ## -## The Original Code is RabbitMQ. +## The Original Code is RabbitMQ. ## -## The Initial Developers of the Original Code are LShift Ltd, -## Cohesive Financial Technologies LLC, and Rabbit Technologies Ltd. -## -## Portions created before 22-Nov-2008 00:00:00 GMT by LShift Ltd, -## Cohesive Financial Technologies LLC, or Rabbit Technologies Ltd -## are Copyright (C) 2007-2008 LShift Ltd, Cohesive Financial -## Technologies LLC, and Rabbit Technologies Ltd. -## -## Portions created by LShift Ltd are Copyright (C) 2007-2010 LShift -## Ltd. Portions created by Cohesive Financial Technologies LLC are -## Copyright (C) 2007-2010 Cohesive Financial Technologies -## LLC. Portions created by Rabbit Technologies Ltd are Copyright -## (C) 2007-2010 Rabbit Technologies Ltd. -## -## All Rights Reserved. -## -## Contributor(s): ______________________________________. +## The Initial Developer of the Original Code is VMware, Inc. +## Copyright (c) 2007-2011 VMware, Inc. All rights reserved. ## from __future__ import nested_scopes @@ -75,6 +60,8 @@ AmqpMethod.erlangName = lambda m: "'" + erlangize(m.klass.name) + '.' + erlangize(m.name) + "'" +AmqpClass.erlangName = lambda c: "'" + erlangize(c.name) + "'" + def erlangConstantName(s): return '_'.join(re.split('[- ]', s.upper())) @@ -93,40 +80,46 @@ def full(self): return self.count() == 8 +def multiLineFormat(things, prologue, separator, lineSeparator, epilogue, thingsPerLine = 4): + r = [prologue] + i = 0 + for t in things: + if i != 0: + if i % thingsPerLine == 0: + r += [lineSeparator] + else: + r += [separator] + r += [t] + i += 1 + r += [epilogue] + return "".join(r) + +def prettyType(typeName, subTypes, typesPerLine = 4): + """Pretty print a type signature made up of many alternative subtypes""" + sTs = multiLineFormat(subTypes, + "( ", " | ", "\n | ", " )", + thingsPerLine = typesPerLine) + return "-type(%s ::\n %s)." % (typeName, sTs) + def printFileHeader(): print """%% Autogenerated code. Do not edit. %% -%% The contents of this file are subject to the Mozilla Public License -%% Version 1.1 (the "License"); you may not use this file except in -%% compliance with the License. You may obtain a copy of the License at -%% http://www.mozilla.org/MPL/ -%% -%% Software distributed under the License is distributed on an "AS IS" -%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the -%% License for the specific language governing rights and limitations -%% under the License. +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License +%% at http://www.mozilla.org/MPL/ %% -%% The Original Code is RabbitMQ. +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and +%% limitations under the License. %% -%% The Initial Developers of the Original Code are LShift Ltd, -%% Cohesive Financial Technologies LLC, and Rabbit Technologies Ltd. +%% The Original Code is RabbitMQ. %% -%% Portions created before 22-Nov-2008 00:00:00 GMT by LShift Ltd, -%% Cohesive Financial Technologies LLC, or Rabbit Technologies Ltd -%% are Copyright (C) 2007-2008 LShift Ltd, Cohesive Financial -%% Technologies LLC, and Rabbit Technologies Ltd. -%% -%% Portions created by LShift Ltd are Copyright (C) 2007-2010 LShift -%% Ltd. Portions created by Cohesive Financial Technologies LLC are -%% Copyright (C) 2007-2010 Cohesive Financial Technologies -%% LLC. Portions created by Rabbit Technologies Ltd are Copyright -%% (C) 2007-2010 Rabbit Technologies Ltd. -%% -%% All Rights Reserved. -%% -%% Contributor(s): ______________________________________. +%% The Initial Developer of the Original Code is VMware, Inc. +%% Copyright (c) 2007-2011 VMware, Inc. All rights reserved. %%""" - + def genErl(spec): def erlType(domain): return erlangTypeMap[spec.resolveDomain(domain)] @@ -146,12 +139,15 @@ def genLookupMethodName(m): print "lookup_method_name({%d, %d}) -> %s;" % (m.klass.index, m.index, m.erlangName()) + def genLookupClassName(c): + print "lookup_class_name(%d) -> %s;" % (c.index, c.erlangName()) + def genMethodId(m): print "method_id(%s) -> {%d, %d};" % (m.erlangName(), m.klass.index, m.index) def genMethodHasContent(m): print "method_has_content(%s) -> %s;" % (m.erlangName(), str(m.hasContent).lower()) - + def genMethodIsSynchronous(m): hasNoWait = "nowait" in fieldNameList(m.arguments) if m.isSynchronous and hasNoWait: @@ -214,11 +210,12 @@ elif type == 'table': print " F%d = rabbit_binary_parser:parse_table(F%dTab)," % \ (f.index, f.index) - elif type == 'shortstr': - print " if F%dLen > 255 -> exit(method_field_shortstr_overflow); true -> ok end," % (f.index) else: pass + def genMethodRecord(m): + print "method_record(%s) -> #%s{};" % (m.erlangName(), m.erlangName()) + def genDecodeMethodFields(m): packedFields = packMethodFields(m.arguments) binaryPattern = ', '.join([methodFieldFragment(f) for f in packedFields]) @@ -249,8 +246,7 @@ print " F%dTab = rabbit_binary_generator:generate_table(F%d)," % (f.index, f.index) print " F%dLen = size(F%dTab)," % (f.index, f.index) elif type == 'shortstr': - print " F%dLen = size(F%d)," % (f.index, f.index) - print " if F%dLen > 255 -> exit(method_field_shortstr_overflow); true -> ok end," % (f.index) + print " F%dLen = shortstr_size(F%d)," % (f.index, f.index) elif type == 'longstr': print " F%dLen = size(F%d)," % (f.index, f.index) else: @@ -291,14 +287,22 @@ methods = spec.allMethods() printFileHeader() - print """-module(rabbit_framing). --include("rabbit_framing.hrl"). + module = "rabbit_framing_amqp_%d_%d" % (spec.major, spec.minor) + if spec.revision != 0: + module = "%s_%d" % (module, spec.revision) + if module == "rabbit_framing_amqp_8_0": + module = "rabbit_framing_amqp_0_8" + print "-module(%s)." % module + print """-include("rabbit_framing.hrl"). +-export([version/0]). -export([lookup_method_name/1]). +-export([lookup_class_name/1]). -export([method_id/1]). -export([method_has_content/1]). -export([is_method_synchronous/1]). +-export([method_record/1]). -export([method_fieldnames/1]). -export([decode_method_fields/2]). -export([decode_properties/2]). @@ -307,13 +311,107 @@ -export([lookup_amqp_exception/1]). -export([amqp_exception/1]). +""" + print "%% Various types" + print "-ifdef(use_specs)." + + print """-export_type([amqp_field_type/0, amqp_property_type/0, + amqp_table/0, amqp_array/0, amqp_value/0, + amqp_method_name/0, amqp_method/0, amqp_method_record/0, + amqp_method_field_name/0, amqp_property_record/0, + amqp_exception/0, amqp_exception_code/0, amqp_class_id/0]). + +-type(amqp_field_type() :: + 'longstr' | 'signedint' | 'decimal' | 'timestamp' | + 'table' | 'byte' | 'double' | 'float' | 'long' | + 'short' | 'bool' | 'binary' | 'void'). +-type(amqp_property_type() :: + 'shortstr' | 'longstr' | 'octet' | 'shortint' | 'longint' | + 'longlongint' | 'timestamp' | 'bit' | 'table'). + +-type(amqp_table() :: [{binary(), amqp_field_type(), amqp_value()}]). +-type(amqp_array() :: [{amqp_field_type(), amqp_value()}]). +-type(amqp_value() :: binary() | % longstr + integer() | % signedint + {non_neg_integer(), non_neg_integer()} | % decimal + amqp_table() | + amqp_array() | + byte() | % byte + float() | % double + integer() | % long + integer() | % short + boolean() | % bool + binary() | % binary + 'undefined' | % void + non_neg_integer() % timestamp + ). +""" + + print prettyType("amqp_method_name()", + [m.erlangName() for m in methods]) + print prettyType("amqp_method()", + ["{%s, %s}" % (m.klass.index, m.index) for m in methods], + 6) + print prettyType("amqp_method_record()", + ["#%s{}" % (m.erlangName()) for m in methods]) + fieldNames = set() + for m in methods: + fieldNames.update(m.arguments) + fieldNames = [erlangize(f.name) for f in fieldNames] + print prettyType("amqp_method_field_name()", + fieldNames) + print prettyType("amqp_property_record()", + ["#'P_%s'{}" % erlangize(c.name) for c in spec.allClasses()]) + print prettyType("amqp_exception()", + ["'%s'" % erlangConstantName(c).lower() for (c, v, cls) in spec.constants]) + print prettyType("amqp_exception_code()", + ["%i" % v for (c, v, cls) in spec.constants]) + classIds = set() + for m in spec.allMethods(): + classIds.add(m.klass.index) + print prettyType("amqp_class_id()", + ["%i" % ci for ci in classIds]) + print "-endif. % use_specs" + + print """ +%% Method signatures +-ifdef(use_specs). +-spec(version/0 :: () -> {non_neg_integer(), non_neg_integer(), non_neg_integer()}). +-spec(lookup_method_name/1 :: (amqp_method()) -> amqp_method_name()). +-spec(method_id/1 :: (amqp_method_name()) -> amqp_method()). +-spec(method_has_content/1 :: (amqp_method_name()) -> boolean()). +-spec(is_method_synchronous/1 :: (amqp_method_record()) -> boolean()). +-spec(method_record/1 :: (amqp_method_name()) -> amqp_method_record()). +-spec(method_fieldnames/1 :: (amqp_method_name()) -> [amqp_method_field_name()]). +-spec(decode_method_fields/2 :: + (amqp_method_name(), binary()) -> amqp_method_record() | rabbit_types:connection_exit()). +-spec(decode_properties/2 :: (non_neg_integer(), binary()) -> amqp_property_record()). +-spec(encode_method_fields/1 :: (amqp_method_record()) -> binary()). +-spec(encode_properties/1 :: (amqp_property_record()) -> binary()). +-spec(lookup_amqp_exception/1 :: (amqp_exception()) -> {boolean(), amqp_exception_code(), binary()}). +-spec(amqp_exception/1 :: (amqp_exception_code()) -> amqp_exception()). +-endif. % use_specs + bitvalue(true) -> 1; bitvalue(false) -> 0; bitvalue(undefined) -> 0. + +shortstr_size(S) -> + case size(S) of + Len when Len =< 255 -> Len; + _ -> exit(method_field_shortstr_overflow) + end. """ + version = "{%d, %d, %d}" % (spec.major, spec.minor, spec.revision) + if version == '{8, 0, 0}': version = '{0, 8, 0}' + print "version() -> %s." % (version) + for m in methods: genLookupMethodName(m) print "lookup_method_name({_ClassId, _MethodId} = Id) -> exit({unknown_method_id, Id})." + for c in spec.allClasses(): genLookupClassName(c) + print "lookup_class_name(ClassId) -> exit({unknown_class_id, ClassId})." + for m in methods: genMethodId(m) print "method_id(Name) -> exit({unknown_method_name, Name})." @@ -323,6 +421,9 @@ for m in methods: genMethodIsSynchronous(m) print "is_method_synchronous(Name) -> exit({unknown_method_name, Name})." + for m in methods: genMethodRecord(m) + print "method_record(Name) -> exit({unknown_method_name, Name})." + for m in methods: genMethodFieldNames(m) print "method_fieldnames(Name) -> exit({unknown_method_name, Name})." @@ -362,12 +463,10 @@ result += ' = ' + conv_fn(field.defaultvalue) return result return ', '.join([fillField(f) for f in fields]) - + methods = spec.allMethods() printFileHeader() - print "-define(PROTOCOL_VERSION_MAJOR, %d)." % (spec.major) - print "-define(PROTOCOL_VERSION_MINOR, %d)." % (spec.minor) print "-define(PROTOCOL_PORT, %d)." % (spec.port) for (c,v,cls) in spec.constants: @@ -381,12 +480,14 @@ for c in spec.allClasses(): print "-record('P_%s', {%s})." % (erlangize(c.name), fieldNameList(c.fields)) + def generateErl(specPath): genErl(AmqpSpec(specPath)) def generateHrl(specPath): genHrl(AmqpSpec(specPath)) - + if __name__ == "__main__": - do_main(generateHrl, generateErl) + do_main_dict({"header": generateHrl, + "body": generateErl}) diff -Nru rabbitmq-server-1.7.2/debian/changelog rabbitmq-server-2.3.1/debian/changelog --- rabbitmq-server-1.7.2/debian/changelog 2011-07-06 08:36:56.000000000 +0000 +++ rabbitmq-server-2.3.1/debian/changelog 2011-07-06 08:36:57.000000000 +0000 @@ -1,16 +1,58 @@ -rabbitmq-server (1.7.2-1ubuntu1) lucid; urgency=low +rabbitmq-server (2.3.1-1ubuntu1~lucid1~ppa1) lucid; urgency=low - * debian/preinst, debian/rabbitmq-server.templates: Warn user about upgrading from a really old version. - (LP: #506985) - * debian/control: Update maintainer according to spec. + * Backport to Lucid. No source changes. - -- Chuck Short Tue, 30 Mar 2010 23:10:55 -0400 + -- William Grant Wed, 06 Jul 2011 18:27:10 +1000 -rabbitmq-server (1.7.2-1) unstable; urgency=low +rabbitmq-server (2.3.1-1ubuntu1) natty; urgency=low + + * fix rabbitmq-stomp FTBFS, which requires erlang libraries in a + rabbit-common specific location; solve with a symlink + + -- Dustin Kirkland Tue, 19 Apr 2011 15:32:28 -0400 + +rabbitmq-server (2.3.1-1) unstable; urgency=low + + * New upstream release, closes: #611253 + + -- John Leuner Sat, 05 Feb 2011 10:21:16 +0200 + +rabbitmq-server (2.2.0-1) unstable; urgency=low + + * New upstream release + + -- John Leuner Thu, 02 Dec 2010 20:41:53 +0200 + +rabbitmq-server (2.1.0-1) unstable; urgency=low + + * New upstream release + + -- John Leuner Mon, 27 Sep 2010 20:28:06 +0200 + +rabbitmq-server (2.0.0-2) unstable; urgency=low + + * Fix various scripts that were not updated correctly in + - the 2.0.0-1 package, closes: #594724 + + -- John Leuner Thu, 02 Sep 2010 18:01:37 +0200 + +rabbitmq-server (2.0.0-1) unstable; urgency=low + + * New upstream release + + -- John Leuner Sat, 28 Aug 2010 11:21:48 +0200 + +rabbitmq-server (1.8.1-1) unstable; urgency=low + + * New upstream release + + -- John Leuner Sun, 01 Aug 2010 15:47:46 +0200 + +rabbitmq-server (1.8.0-1) unstable; urgency=low * New upstream release - -- John Leuner Fri, 19 Feb 2010 17:30:57 +0200 + -- John Leuner Thu, 24 Jun 2010 18:43:04 +0200 rabbitmq-server (1.7.0-3) unstable; urgency=low diff -Nru rabbitmq-server-1.7.2/debian/control rabbitmq-server-2.3.1/debian/control --- rabbitmq-server-1.7.2/debian/control 2011-07-06 08:36:56.000000000 +0000 +++ rabbitmq-server-2.3.1/debian/control 2011-07-06 08:36:57.000000000 +0000 @@ -1,14 +1,17 @@ Source: rabbitmq-server Section: net Priority: extra -Maintainer: Ubuntu Core Developers +Maintainer: Ubuntu Developers XSBC-Original-Maintainer: John Leuner -Build-Depends: cdbs, debhelper (>= 5), erlang-dev, python-simplejson +Build-Depends: cdbs, debhelper (>= 5), erlang-dev, python-simplejson, xmlto, xsltproc Standards-Version: 3.8.0 Package: rabbitmq-server Architecture: all -Depends: erlang-base | erlang-base-hipe, erlang-ssl | erlang-nox (<< 1:13.b-dfsg1-1), erlang-os-mon | erlang-nox (<< 1:13.b-dfsg1-1), erlang-mnesia | erlang-nox (<< 1:13.b-dfsg1-1), adduser, logrotate, ${misc:Depends} +# erlang-inets is not a strict dependency, but it's needed to allow +# the installation of plugins that use mochiweb. Ideally it would be a +# "Recommends" instead, but gdebi does not install those. +Depends: erlang-base (>= 1:12.b.3) | erlang-base-hipe (>= 1:12.b.3), erlang-ssl | erlang-nox (<< 1:13.b-dfsg1-1), erlang-os-mon | erlang-nox (<< 1:13.b-dfsg1-1), erlang-mnesia | erlang-nox (<< 1:13.b-dfsg1-1), erlang-inets | erlang-nox (<< 1:13.b-dfsg1-1), adduser, logrotate, ${misc:Depends} Description: An AMQP server written in Erlang RabbitMQ is an implementation of AMQP, the emerging standard for high performance enterprise messaging. The RabbitMQ server is a robust and diff -Nru rabbitmq-server-1.7.2/debian/copyright rabbitmq-server-2.3.1/debian/copyright --- rabbitmq-server-1.7.2/debian/copyright 2011-07-06 08:36:56.000000000 +0000 +++ rabbitmq-server-2.3.1/debian/copyright 2011-07-06 08:36:57.000000000 +0000 @@ -3,23 +3,23 @@ It was downloaded from http://www.rabbitmq.com/ -The file codegen/amqp-0.8.json is covered by the following terms: +The files codegen/amqp-rabbitmq-0.8.json and +codegen/amqp-rabbitmq-0.9.1.json are covered by the following terms: + + "Copyright (C) 2008-2011 VMware, Inc. - "Copyright (C) 2008-2010 LShift Ltd, Cohesive Financial Technologies LLC, - and Rabbit Technologies Ltd - Permission is hereby granted, free of charge, to any person obtaining a copy of this file (the Software), to deal in the - Software without restriction, including without limitation the - rights to use, copy, modify, merge, publish, distribute, - sublicense, and/or sell copies of the Software, and to permit - persons to whom the Software is furnished to do so, subject to + Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, + sublicense, and/or sell copies of the Software, and to permit + persons to whom the Software is furnished to do so, subject to the following conditions: - - The above copyright notice and this permission notice shall be + + The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, + + THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT @@ -31,19 +31,8 @@ The rest of this package is licensed under the Mozilla Public License 1.1 Authors and Copyright are as described below: - The Initial Developers of the Original Code are LShift Ltd, - Cohesive Financial Technologies LLC, and Rabbit Technologies Ltd. - - Portions created before 22-Nov-2008 00:00:00 GMT by LShift Ltd, - Cohesive Financial Technologies LLC, or Rabbit Technologies Ltd - are Copyright (C) 2007-2008 LShift Ltd, Cohesive Financial - Technologies LLC, and Rabbit Technologies Ltd. - - Portions created by LShift Ltd are Copyright (C) 2007-2010 LShift - Ltd. Portions created by Cohesive Financial Technologies LLC are - Copyright (C) 2007-2010 Cohesive Financial Technologies - LLC. Portions created by Rabbit Technologies Ltd are Copyright - (C) 2007-2010 Rabbit Technologies Ltd. + The Initial Developer of the Original Code is VMware, Inc. + Copyright (c) 2007-2011 VMware, Inc. All rights reserved. MOZILLA PUBLIC LICENSE @@ -494,23 +483,8 @@ The Original Code is RabbitMQ. - The Initial Developers of the Original Code are LShift Ltd, - Cohesive Financial Technologies LLC, and Rabbit Technologies Ltd. - - Portions created before 22-Nov-2008 00:00:00 GMT by LShift Ltd, - Cohesive Financial Technologies LLC, or Rabbit Technologies Ltd - are Copyright (C) 2007-2008 LShift Ltd, Cohesive Financial - Technologies LLC, and Rabbit Technologies Ltd. - - Portions created by LShift Ltd are Copyright (C) 2007-2010 LShift - Ltd. Portions created by Cohesive Financial Technologies LLC are - Copyright (C) 2007-2010 Cohesive Financial Technologies - LLC. Portions created by Rabbit Technologies Ltd are Copyright - (C) 2007-2010 Rabbit Technologies Ltd. - - All Rights Reserved. - - Contributor(s): ______________________________________.'' + The Initial Developer of the Original Code is VMware, Inc. + Copyright (c) 2007-2011 VMware, Inc. All rights reserved.'' [NOTE: The text of this Exhibit A may differ slightly from the text of the notices in the Source Code files of the Original Code. You should @@ -521,10 +495,8 @@ -If you have any questions regarding licensing, please contact us at +If you have any questions regarding licensing, please contact us at info@rabbitmq.com. -The Debian packaging is (C) 2007-2010, Rabbit Technologies Ltd. -and is licensed under the MPL 1.1, see above. - - +The Debian packaging is (C) 2007-2011, VMware, Inc. and is licensed +under the MPL 1.1, see above. diff -Nru rabbitmq-server-1.7.2/debian/links rabbitmq-server-2.3.1/debian/links --- rabbitmq-server-1.7.2/debian/links 1970-01-01 00:00:00.000000000 +0000 +++ rabbitmq-server-2.3.1/debian/links 2011-07-06 08:36:57.000000000 +0000 @@ -0,0 +1 @@ +usr/lib/rabbitmq/lib/rabbitmq_server-2.3.1 usr/lib/erlang/lib/rabbit_common diff -Nru rabbitmq-server-1.7.2/debian/postinst rabbitmq-server-2.3.1/debian/postinst --- rabbitmq-server-1.7.2/debian/postinst 2011-07-06 08:36:56.000000000 +0000 +++ rabbitmq-server-2.3.1/debian/postinst 2011-07-06 08:36:57.000000000 +0000 @@ -26,7 +26,8 @@ # create rabbitmq user if ! getent passwd rabbitmq >/dev/null; then adduser --system --ingroup rabbitmq --home /var/lib/rabbitmq \ - --no-create-home --gecos "RabbitMQ messaging server" rabbitmq + --no-create-home --gecos "RabbitMQ messaging server" \ + --disabled-login rabbitmq fi chown -R rabbitmq:rabbitmq /var/lib/rabbitmq diff -Nru rabbitmq-server-1.7.2/debian/postrm rabbitmq-server-2.3.1/debian/postrm --- rabbitmq-server-1.7.2/debian/postrm 1970-01-01 00:00:00.000000000 +0000 +++ rabbitmq-server-2.3.1/debian/postrm 2011-07-06 08:36:57.000000000 +0000 @@ -0,0 +1,73 @@ +#!/bin/sh +# postrm script for rabbitmq +# +# see: dh_installdeb(1) + +set -e + +# summary of how this script can be called: +# * `remove' +# * `purge' +# * `upgrade' +# * `failed-upgrade' +# * `abort-install' +# * `abort-install' +# * `abort-upgrade' +# * `disappear' +# +# for details, see http://www.debian.org/doc/debian-policy/ or +# the debian-policy package + +remove_plugin_traces() { + # Remove traces of plugins + rm -rf /var/lib/rabbitmq/plugins-scratch +} + +case "$1" in + purge) + rm -f /etc/default/rabbitmq + if [ -d /var/lib/rabbitmq ]; then + rm -r /var/lib/rabbitmq + fi + if [ -d /var/log/rabbitmq ]; then + rm -r /var/log/rabbitmq + fi + if [ -d /var/run/rabbitmq ]; then + rm -r /var/run/rabbitmq + fi + if [ -d /etc/rabbitmq ]; then + rm -r /etc/rabbitmq + fi + remove_plugin_traces + if getent passwd rabbitmq >/dev/null; then + # Stop epmd if run by the rabbitmq user + pkill -u rabbitmq epmd || : + + deluser rabbitmq + fi + if getent group rabbitmq >/dev/null; then + delgroup rabbitmq + fi + ;; + + remove|upgrade) + remove_plugin_traces + ;; + + failed-upgrade|abort-install|abort-upgrade|disappear) + ;; + + *) + echo "postrm called with unknown argument \`$1'" >&2 + exit 1 + ;; +esac + +# dh_installdeb will replace this with shell code automatically +# generated by other debhelper scripts. + +#DEBHELPER# + +exit 0 + + diff -Nru rabbitmq-server-1.7.2/debian/postrm.in rabbitmq-server-2.3.1/debian/postrm.in --- rabbitmq-server-1.7.2/debian/postrm.in 2011-07-06 08:36:56.000000000 +0000 +++ rabbitmq-server-2.3.1/debian/postrm.in 2011-07-06 08:36:57.000000000 +0000 @@ -18,6 +18,10 @@ # for details, see http://www.debian.org/doc/debian-policy/ or # the debian-policy package +remove_plugin_traces() { + # Remove traces of plugins + rm -rf /var/lib/rabbitmq/plugins-scratch +} case "$1" in purge) @@ -34,11 +38,7 @@ if [ -d /etc/rabbitmq ]; then rm -r /etc/rabbitmq fi - # Remove traces of plugins - rm -rf @RABBIT_LIB@/priv @RABBIT_LIB@/plugins - for ext in rel script boot ; do - rm -f @RABBIT_LIB@/ebin/rabbit.$ext - done + remove_plugin_traces if getent passwd rabbitmq >/dev/null; then # Stop epmd if run by the rabbitmq user pkill -u rabbitmq epmd || : @@ -50,7 +50,11 @@ fi ;; - remove|upgrade|failed-upgrade|abort-install|abort-upgrade|disappear) + remove|upgrade) + remove_plugin_traces + ;; + + failed-upgrade|abort-install|abort-upgrade|disappear) ;; *) diff -Nru rabbitmq-server-1.7.2/debian/preinst rabbitmq-server-2.3.1/debian/preinst --- rabbitmq-server-1.7.2/debian/preinst 2011-07-06 08:36:56.000000000 +0000 +++ rabbitmq-server-2.3.1/debian/preinst 1970-01-01 00:00:00.000000000 +0000 @@ -1,14 +0,0 @@ -#!/bin/sh - -set -e - -. /usr/share/debconf/confmodule - -if dpkg --compare-versions "$2" lt 1.6.0; then - db_version 2.0 - - db_input high rabbitmq-server/upgrade_previous || true - db_go || true -fi - -#DEBHELPER# diff -Nru rabbitmq-server-1.7.2/debian/rabbitmq-asroot-script-wrapper rabbitmq-server-2.3.1/debian/rabbitmq-asroot-script-wrapper --- rabbitmq-server-1.7.2/debian/rabbitmq-asroot-script-wrapper 2011-07-06 08:36:56.000000000 +0000 +++ rabbitmq-server-2.3.1/debian/rabbitmq-asroot-script-wrapper 1970-01-01 00:00:00.000000000 +0000 @@ -1,45 +0,0 @@ -#!/bin/sh -## The contents of this file are subject to the Mozilla Public License -## Version 1.1 (the "License"); you may not use this file except in -## compliance with the License. You may obtain a copy of the License at -## http://www.mozilla.org/MPL/ -## -## Software distributed under the License is distributed on an "AS IS" -## basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the -## License for the specific language governing rights and limitations -## under the License. -## -## The Original Code is RabbitMQ. -## -## The Initial Developers of the Original Code are LShift Ltd, -## Cohesive Financial Technologies LLC, and Rabbit Technologies Ltd. -## -## Portions created before 22-Nov-2008 00:00:00 GMT by LShift Ltd, -## Cohesive Financial Technologies LLC, or Rabbit Technologies Ltd -## are Copyright (C) 2007-2008 LShift Ltd, Cohesive Financial -## Technologies LLC, and Rabbit Technologies Ltd. -## -## Portions created by LShift Ltd are Copyright (C) 2007-2010 LShift -## Ltd. Portions created by Cohesive Financial Technologies LLC are -## Copyright (C) 2007-2010 Cohesive Financial Technologies -## LLC. Portions created by Rabbit Technologies Ltd are Copyright -## (C) 2007-2010 Rabbit Technologies Ltd. -## -## All Rights Reserved. -## -## Contributor(s): ______________________________________. -## - -cd /var/lib/rabbitmq - -SCRIPT=`basename $0` - -if [ `id -u` = 0 ] ; then - /usr/lib/rabbitmq/bin/${SCRIPT} "$@" -else - echo - echo "Only root should run ${SCRIPT}" - echo - exit 1 -fi - diff -Nru rabbitmq-server-1.7.2/debian/rabbitmq-script-wrapper rabbitmq-server-2.3.1/debian/rabbitmq-script-wrapper --- rabbitmq-server-1.7.2/debian/rabbitmq-script-wrapper 2011-07-06 08:36:56.000000000 +0000 +++ rabbitmq-server-2.3.1/debian/rabbitmq-script-wrapper 2011-07-06 08:36:57.000000000 +0000 @@ -1,33 +1,18 @@ #!/bin/sh -## The contents of this file are subject to the Mozilla Public License -## Version 1.1 (the "License"); you may not use this file except in -## compliance with the License. You may obtain a copy of the License at -## http://www.mozilla.org/MPL/ +## The contents of this file are subject to the Mozilla Public License +## Version 1.1 (the "License"); you may not use this file except in +## compliance with the License. You may obtain a copy of the License +## at http://www.mozilla.org/MPL/ +## +## Software distributed under the License is distributed on an "AS IS" +## basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +## the License for the specific language governing rights and +## limitations under the License. ## -## Software distributed under the License is distributed on an "AS IS" -## basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the -## License for the specific language governing rights and limitations -## under the License. +## The Original Code is RabbitMQ. ## -## The Original Code is RabbitMQ. -## -## The Initial Developers of the Original Code are LShift Ltd, -## Cohesive Financial Technologies LLC, and Rabbit Technologies Ltd. -## -## Portions created before 22-Nov-2008 00:00:00 GMT by LShift Ltd, -## Cohesive Financial Technologies LLC, or Rabbit Technologies Ltd -## are Copyright (C) 2007-2008 LShift Ltd, Cohesive Financial -## Technologies LLC, and Rabbit Technologies Ltd. -## -## Portions created by LShift Ltd are Copyright (C) 2007-2010 LShift -## Ltd. Portions created by Cohesive Financial Technologies LLC are -## Copyright (C) 2007-2010 Cohesive Financial Technologies -## LLC. Portions created by Rabbit Technologies Ltd are Copyright -## (C) 2007-2010 Rabbit Technologies Ltd. -## -## All Rights Reserved. -## -## Contributor(s): ______________________________________. +## The Initial Developer of the Original Code is VMware, Inc. +## Copyright (c) 2007-2011 VMware, Inc. All rights reserved. ## # Escape spaces and quotes, because shell is revolting. @@ -55,4 +40,3 @@ echo exit 1 fi - diff -Nru rabbitmq-server-1.7.2/debian/rabbitmq-server.ocf rabbitmq-server-2.3.1/debian/rabbitmq-server.ocf --- rabbitmq-server-1.7.2/debian/rabbitmq-server.ocf 1970-01-01 00:00:00.000000000 +0000 +++ rabbitmq-server-2.3.1/debian/rabbitmq-server.ocf 2011-07-06 08:36:57.000000000 +0000 @@ -0,0 +1,343 @@ +#!/bin/sh +## The contents of this file are subject to the Mozilla Public License +## Version 1.1 (the "License"); you may not use this file except in +## compliance with the License. You may obtain a copy of the License +## at http://www.mozilla.org/MPL/ +## +## Software distributed under the License is distributed on an "AS IS" +## basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +## the License for the specific language governing rights and +## limitations under the License. +## +## The Original Code is RabbitMQ. +## +## The Initial Developer of the Original Code is VMware, Inc. +## Copyright (c) 2007-2011 VMware, Inc. All rights reserved. +## + +## +## OCF Resource Agent compliant rabbitmq-server resource script. +## + +## OCF instance parameters +## OCF_RESKEY_multi +## OCF_RESKEY_ctl +## OCF_RESKEY_nodename +## OCF_RESKEY_ip +## OCF_RESKEY_port +## OCF_RESKEY_config_file +## OCF_RESKEY_log_base +## OCF_RESKEY_mnesia_base +## OCF_RESKEY_server_start_args + +####################################################################### +# Initialization: + +: ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/resource.d/heartbeat} +. ${OCF_FUNCTIONS_DIR}/.ocf-shellfuncs + +####################################################################### + +OCF_RESKEY_multi_default="/usr/sbin/rabbitmq-multi" +OCF_RESKEY_ctl_default="/usr/sbin/rabbitmqctl" +OCF_RESKEY_nodename_default="rabbit@localhost" +OCF_RESKEY_log_base_default="/var/log/rabbitmq" +: ${OCF_RESKEY_multi=${OCF_RESKEY_multi_default}} +: ${OCF_RESKEY_ctl=${OCF_RESKEY_ctl_default}} +: ${OCF_RESKEY_nodename=${OCF_RESKEY_nodename_default}} +: ${OCF_RESKEY_log_base=${OCF_RESKEY_log_base_default}} + +meta_data() { + cat < + + +1.0 + + +Resource agent for RabbitMQ-server + + +Resource agent for RabbitMQ-server + + + + +The path to the rabbitmq-multi script + +Path to rabbitmq-multi + + + + + +The path to the rabbitmqctl script + +Path to rabbitmqctl + + + + + +The node name for rabbitmq-server + +Node name + + + + + +The IP address for rabbitmq-server to listen on + +IP Address + + + + + +The IP Port for rabbitmq-server to listen on + +IP Port + + + + + +Location of the config file + +Config file path + + + + + +Location of the directory under which logs will be created + +Log base path + + + + + +Location of the directory under which mnesia will store data + +Mnesia base path + + + + + +Additional arguments provided to the server on startup + +Server start arguments + + + + + + + + + + + + + + +END +} + +rabbit_usage() { + cat < /dev/null 2> /dev/null + rc=$? + case "$rc" in + 0) + ocf_log debug "RabbitMQ server is running normally" + return $OCF_SUCCESS + ;; + 2) + ocf_log debug "RabbitMQ server is not running" + return $OCF_NOT_RUNNING + ;; + *) + ocf_log err "Unexpected return from rabbitmqctl $NODENAME_ARG status: $rc" + exit $OCF_ERR_GENERIC + esac +} + +rabbit_start() { + local rc + + if rabbit_status; then + ocf_log info "Resource already running." + return $OCF_SUCCESS + fi + + export_vars + + $RABBITMQ_MULTI start_all 1 > ${RABBITMQ_LOG_BASE}/startup_log 2> ${RABBITMQ_LOG_BASE}/startup_err & + rc=$? + + if [ "$rc" != 0 ]; then + ocf_log err "rabbitmq-server start command failed: $RABBITMQ_MULTI start_all 1, $rc" + return $rc + fi + + # Spin waiting for the server to come up. + # Let the CRM/LRM time us out if required + start_wait=1 + while [ $start_wait = 1 ]; do + rabbit_status + rc=$? + if [ "$rc" = $OCF_SUCCESS ]; then + start_wait=0 + elif [ "$rc" != $OCF_NOT_RUNNING ]; then + ocf_log info "rabbitmq-server start failed: $rc" + exit $OCF_ERR_GENERIC + fi + sleep 1 + done + + return $OCF_SUCCESS +} + +rabbit_stop() { + local rc + + if ! rabbit_status; then + ocf_log info "Resource not running." + return $OCF_SUCCESS + fi + + $RABBITMQ_MULTI stop_all & + rc=$? + + if [ "$rc" != 0 ]; then + ocf_log err "rabbitmq-server stop command failed: $RABBITMQ_MULTI stop_all, $rc" + return $rc + fi + + # Spin waiting for the server to shut down. + # Let the CRM/LRM time us out if required + stop_wait=1 + while [ $stop_wait = 1 ]; do + rabbit_status + rc=$? + if [ "$rc" = $OCF_NOT_RUNNING ]; then + stop_wait=0 + break + elif [ "$rc" != $OCF_SUCCESS ]; then + ocf_log info "rabbitmq-server stop failed: $rc" + exit $OCF_ERR_GENERIC + fi + sleep 1 + done + + return $OCF_SUCCESS +} + +rabbit_monitor() { + rabbit_status + return $? +} + +case $__OCF_ACTION in + meta-data) + meta_data + exit $OCF_SUCCESS + ;; + usage|help) + rabbit_usage + exit $OCF_SUCCESS + ;; +esac + +if ocf_is_probe; then + rabbit_validate_partial +else + rabbit_validate_full +fi + +case $__OCF_ACTION in + start) + rabbit_start + ;; + stop) + rabbit_stop + ;; + status|monitor) + rabbit_monitor + ;; + validate-all) + exit $OCF_SUCCESS + ;; + *) + rabbit_usage + exit $OCF_ERR_UNIMPLEMENTED + ;; +esac + +exit $? diff -Nru rabbitmq-server-1.7.2/debian/rabbitmq-server.templates rabbitmq-server-2.3.1/debian/rabbitmq-server.templates --- rabbitmq-server-1.7.2/debian/rabbitmq-server.templates 2011-07-06 08:36:56.000000000 +0000 +++ rabbitmq-server-2.3.1/debian/rabbitmq-server.templates 1970-01-01 00:00:00.000000000 +0000 @@ -1,9 +0,0 @@ -Template: rabbitmq-server/upgrade_previous -Type: note -Description: Upgrading from 1.5.4 and below. - You are upgrading from a release prior to 1.6.0. When the - RabbitMQ server detects the presence of an old database, it moves it - to a backup location, creates a fresh, empty database, and logs a - warning. If your RabbitMQ installation contains important data then we - recommend you contact support@rabbitmq.com for assistance with the - upgrade. diff -Nru rabbitmq-server-1.7.2/debian/rules rabbitmq-server-2.3.1/debian/rules --- rabbitmq-server-1.7.2/debian/rules 2011-07-06 08:36:56.000000000 +0000 +++ rabbitmq-server-2.3.1/debian/rules 2011-07-06 08:36:57.000000000 +0000 @@ -13,11 +13,9 @@ install/rabbitmq-server:: mkdir -p $(DOCDIR) - rm $(RABBIT_LIB)LICENSE* + rm $(RABBIT_LIB)LICENSE* $(RABBIT_LIB)INSTALL* for script in rabbitmqctl rabbitmq-server rabbitmq-multi; do \ install -p -D -m 0755 debian/rabbitmq-script-wrapper $(DEB_DESTDIR)usr/sbin/$$script; \ done - for script in rabbitmq-activate-plugins rabbitmq-deactivate-plugins; do \ - install -p -D -m 0755 debian/rabbitmq-asroot-script-wrapper $(DEB_DESTDIR)usr/sbin/$$script; \ - done sed -e 's|@RABBIT_LIB@|/usr/lib/rabbitmq/lib/rabbitmq_server-$(DEB_UPSTREAM_VERSION)|g' debian/postrm + install -p -D -m 0755 debian/rabbitmq-server.ocf $(DEB_DESTDIR)usr/lib/ocf/resource.d/rabbitmq/rabbitmq-server diff -Nru rabbitmq-server-1.7.2/docs/examples-to-end.xsl rabbitmq-server-2.3.1/docs/examples-to-end.xsl --- rabbitmq-server-1.7.2/docs/examples-to-end.xsl 1970-01-01 00:00:00.000000000 +0000 +++ rabbitmq-server-2.3.1/docs/examples-to-end.xsl 2011-02-03 12:47:36.000000000 +0000 @@ -0,0 +1,94 @@ + + + + + + + + + + + + + + + + + + + + + + Examples + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + [] + + + + {} + + + + + + + + diff -Nru rabbitmq-server-1.7.2/docs/html-to-website-xml.xsl rabbitmq-server-2.3.1/docs/html-to-website-xml.xsl --- rabbitmq-server-1.7.2/docs/html-to-website-xml.xsl 1970-01-01 00:00:00.000000000 +0000 +++ rabbitmq-server-2.3.1/docs/html-to-website-xml.xsl 2011-02-03 12:47:36.000000000 +0000 @@ -0,0 +1,98 @@ + + + + + + + + + + + + + + + + + + + + + + +type="text/xml" href="page.xsl" + + + <xsl:value-of select="document($original)/refentry/refnamediv/refname"/><xsl:if test="document($original)/refentry/refmeta/manvolnum">(<xsl:value-of select="document($original)/refentry/refmeta/manvolnum"/>)</xsl:if> manual page + + + + + +

+ This is the manual page for + (). +

+

+ See a list of all manual pages. +

+
+ +

+ This is the documentation for + . +

+
+
+

+ For more general documentation, please see the + administrator's guide. +

+ + + Table of Contents + + + +
+ + +
+ + + + + + + + + + + + + + + + + + + + + + +
+    
+  
+
+ + +
+ +
+
+ +
+ diff -Nru rabbitmq-server-1.7.2/docs/rabbitmq-activate-plugins.1.pod rabbitmq-server-2.3.1/docs/rabbitmq-activate-plugins.1.pod --- rabbitmq-server-1.7.2/docs/rabbitmq-activate-plugins.1.pod 2010-02-15 16:01:13.000000000 +0000 +++ rabbitmq-server-2.3.1/docs/rabbitmq-activate-plugins.1.pod 1970-01-01 00:00:00.000000000 +0000 @@ -1,37 +0,0 @@ -=head1 NAME - -rabbitmq-activate-plugins - command line tool for activating plugins -in a RabbitMQ broker - -=head1 SYNOPSIS - -rabbitmq-activate-plugins - -=head1 DESCRIPTION - -RabbitMQ is an implementation of AMQP, the emerging standard for high -performance enterprise messaging. The RabbitMQ server is a robust and -scalable implementation of an AMQP broker. - -rabbitmq-activate-plugins is a command line tool for activating -plugins installed into the broker's plugins directory. - -=head1 EXAMPLES - -To activate all of the installed plugins in the current RabbitMQ install, -execute: - - rabbitmq-activate-plugins - -=head1 SEE ALSO - -L, L, L, -L, L - -=head1 AUTHOR - -The RabbitMQ Team - -=head1 REFERENCES - -RabbitMQ Web Site: L diff -Nru rabbitmq-server-1.7.2/docs/rabbitmq.conf.5.pod rabbitmq-server-2.3.1/docs/rabbitmq.conf.5.pod --- rabbitmq-server-1.7.2/docs/rabbitmq.conf.5.pod 2010-02-15 16:01:13.000000000 +0000 +++ rabbitmq-server-2.3.1/docs/rabbitmq.conf.5.pod 1970-01-01 00:00:00.000000000 +0000 @@ -1,69 +0,0 @@ -=head1 NAME - -F - default settings for RabbitMQ AMQP -server - -=head1 DESCRIPTION - -F contains variable settings that override the -defaults built in to the RabbitMQ startup scripts. - -The file is interpreted by the system shell, and so should consist of -a sequence of shell environment variable definitions. Normal shell -syntax is permitted (since the file is sourced using the shell "." -operator), including line comments starting with "#". - -In order of preference, the startup scripts get their values from the -environment, from F and finally from the -built-in default values. For example, for the B -setting, - -=over - -=item B - -from the environment is checked first. If it is absent or equal to the -empty string, then - -=item B - -from L is checked. If it is also absent -or set equal to the empty string then the default value from the -startup script is used. - -The variable names in /etc/rabbitmq/rabbitmq.conf are always equal to the -environment variable names, with the B prefix removed: -B from the environment becomes B in the -F file, etc. - -=back - -=head1 EXAMPLES - -The following is an example of a complete -F file that overrides the default Erlang -node name from "rabbit" to "hare": - - # I am a complete /etc/rabbitmq/rabbitmq.conf file. - # Comment lines start with a hash character. - # This is a /bin/sh script file - use ordinary envt var syntax - NODENAME=hare - -=head1 SEE ALSO - -L, L, L - -=head1 AUTHOR - -Originally written by The RabbitMQ Team - -=head1 COPYRIGHT - -This package, the RabbitMQ server is licensed under the MPL. - -If you have any questions regarding licensing, please contact us at -info@rabbitmq.com. - -=head1 REFERENCES - -RabbitMQ Web Site: L diff -Nru rabbitmq-server-1.7.2/docs/rabbitmq.conf.5.xml rabbitmq-server-2.3.1/docs/rabbitmq.conf.5.xml --- rabbitmq-server-1.7.2/docs/rabbitmq.conf.5.xml 1970-01-01 00:00:00.000000000 +0000 +++ rabbitmq-server-2.3.1/docs/rabbitmq.conf.5.xml 2011-02-03 12:47:36.000000000 +0000 @@ -0,0 +1,84 @@ + + + + + RabbitMQ Server + + The RabbitMQ Team <info@rabbitmq.com> + + + + + rabbitmq.conf + 5 + RabbitMQ Server + + + + rabbitmq.conf + default settings for RabbitMQ AMQP server + + + + Description + +/etc/rabbitmq/rabbitmq.conf contains variable settings that override the +defaults built in to the RabbitMQ startup scripts. + + +The file is interpreted by the system shell, and so should consist of +a sequence of shell environment variable definitions. Normal shell +syntax is permitted (since the file is sourced using the shell "." +operator), including line comments starting with "#". + + +In order of preference, the startup scripts get their values from the +environment, from /etc/rabbitmq/rabbitmq.conf and finally from the +built-in default values. For example, for the RABBITMQ_NODENAME +setting, + + + RABBITMQ_NODENAME + + +from the environment is checked first. If it is absent or equal to the +empty string, then + + + NODENAME + + +from /etc/rabbitmq/rabbitmq.conf is checked. If it is also absent +or set equal to the empty string then the default value from the +startup script is used. + + +The variable names in /etc/rabbitmq/rabbitmq.conf are always equal to the +environment variable names, with the RABBITMQ_ prefix removed: +RABBITMQ_NODE_PORT from the environment becomes NODE_PORT in the +/etc/rabbitmq/rabbitmq.conf file, etc. + + For example: + +# I am a complete /etc/rabbitmq/rabbitmq.conf file. +# Comment lines start with a hash character. +# This is a /bin/sh script file - use ordinary envt var syntax +NODENAME=hare + + + This is an example of a complete + /etc/rabbitmq/rabbitmq.conf file that overrides the default Erlang + node name from "rabbit" to "hare". + + + + + + See also + + rabbitmq-multi1 + rabbitmq-server1 + rabbitmqctl1 + + + diff -Nru rabbitmq-server-1.7.2/docs/rabbitmqctl.1.pod rabbitmq-server-2.3.1/docs/rabbitmqctl.1.pod --- rabbitmq-server-1.7.2/docs/rabbitmqctl.1.pod 2010-02-15 16:01:13.000000000 +0000 +++ rabbitmq-server-2.3.1/docs/rabbitmqctl.1.pod 1970-01-01 00:00:00.000000000 +0000 @@ -1,536 +0,0 @@ -=head1 NAME - -rabbitmqctl - command line tool for managing a RabbitMQ broker - -=head1 SYNOPSIS - -rabbitmqctl [-n I] I<> [command options] - -=head1 DESCRIPTION - -RabbitMQ is an implementation of AMQP, the emerging standard for high -performance enterprise messaging. The RabbitMQ server is a robust and -scalable implementation of an AMQP broker. - -rabbitmqctl is a command line tool for managing a RabbitMQ broker. -It performs all actions by connecting to one of the broker's nodes. - - -=head1 OPTIONS - -=over - -=item B<-n> I - -Default node is C, where server is the local host. On -a host named C, the node name of the RabbitMQ -Erlang node will usually be rabbit@server (unless RABBITMQ_NODENAME -has been set to some non-default value at broker startup time). The -output of hostname -s is usually the correct suffix to use after the -"@" sign. See rabbitmq-server(1) for details of configuring the -RabbitMQ broker. - -=item B<-q> - -Quiet output mode is selected with the B<-q> flag. Informational -messages are suppressed when quiet mode is in effect. - -=back - -=head1 COMMANDS - -=head2 APPLICATION AND CLUSTER MANAGEMENT - -=over - -=item stop - -Stop the Erlang node on which RabbitMQ broker is running. - -=item stop_app - -Stop the RabbitMQ application, leaving the Erlang node running. This -command is typically run prior to performing other management actions -that require the RabbitMQ application to be stopped, e.g. I. - -=item start_app - -Start the RabbitMQ application. This command is typically run prior -to performing other management actions that require the RabbitMQ -application to be stopped, e.g. I. - -=item status - -Display various information about the RabbitMQ broker, such as whether -the RabbitMQ application on the current node, its version number, what -nodes are part of the broker, which of these are running. - -=item reset - -Return a RabbitMQ node to its virgin state. Removes the node from any -cluster it belongs to, removes all data from the management database, -such as configured users, vhosts and deletes all persistent messages. - -=item force_reset - -The same as I command, but resets the node unconditionally, -regardless of the current management database state and cluster -configuration. It should only be used as a last resort if the -database or cluster configuration has been corrupted. - -=item rotate_logs [suffix] - -Instruct the RabbitMQ node to rotate the log files. The RabbitMQ -broker will attempt to append the current contents of the log file to -the file with the name composed of the original name and the -suffix. It will create a new file if such a file does not already -exist. When no I is specified, the empty log file is simply -created at the original location; no rotation takes place. When an -error occurs while appending the contents of the old log file, the -operation behaves in the same way as if no I was specified. -This command might be helpful when you are e.g. writing your own -logrotate script and you do not want to restart the RabbitMQ node. - -=item cluster I ... - -Instruct the node to become member of a cluster with the specified -nodes determined by I option(s). See -L for more information about -clustering. - -=item close_connection I I - -Instruct the broker to close the connection associated with the Erlang -process id I (see also the I -command), passing the I string to the connected client as -part of the AMQP connection shutdown protocol. - -=back - -=head2 USER MANAGEMENT - -=over - -=item add_user I I - -Create a user named I with (initial) password I. - -=item delete_user I - -Delete the user named I. - -=item change_password I I - -Change the password for the user named I to I. - -=item list_users - -List all users, one per line. - -=back - -=head2 ACCESS CONTROL - -=over - -=item add_vhost I - -Create a new virtual host called I. - -=item delete_vhost I - -Delete a virtual host I. This command deletes also all its -exchanges, queues and user mappings. - -=item list_vhosts - -List all virtual hosts, one per line. - -=item set_permissions [-p I] I I I I - -Set the permissions for the user named I in the virtual host -I, granting I, I and I access to -resources with names matching the first, second and third I, -respectively. - -=item clear_permissions [-p I] I - -Remove the permissions for the user named I in the virtual -host I. - -=item list_permissions [-p I] - -List all the users and their permissions in the virtual host -I. Each output line contains the username and their -I, I and I access regexps, separated by tab -characters. - -=item list_user_permissions I - -List the permissions of the user named I across all virtual -hosts. - -=back - -=head2 SERVER STATUS - -=over - -=item list_queues [-p I] [I ...] - -List queue information by virtual host. Each line printed -describes a queue, with the requested I values -separated by tab characters. If no Is are -specified then I and I are assumed. - -=back - -=head3 Queue information items - -=over - -=item name - -name of the queue - -=item durable - -whether the queue survives server restarts - -=item auto_delete - -whether the queue will be deleted when no longer used - -=item arguments - -queue arguments - -=item pid - -id of the Erlang process associated with the queue - -=item owner_pid - -id of the Erlang process representing the connection which is the -exclusive owner of the queue, or empty if the queue is non-exclusive - -=item exclusive_consumer_pid - -id of the Erlang process representing the channel of the exclusive -consumer subscribed to this queue, or empty if there is no exclusive -consumer - -=item exclusive_consumer_tag - -consumer tag of the exclusive consumer subscribed to this queue, or -empty if there is no exclusive consumer - -=item messages_ready - -number of messages ready to be delivered to clients - -=item messages_unacknowledged - -number of messages delivered to clients but not yet acknowledged - -=item messages_uncommitted - -number of messages published in as yet uncommitted transactions - -=item messages - -sum of ready, unacknowledged and uncommitted messages - -=item acks_uncommitted - -number of acknowledgements received in as yet uncommitted transactions - -=item consumers - -number of consumers - -=item transactions - -number of transactions - -=item memory - -bytes of memory consumed by the Erlang process for the queue, -including stack, heap and internal structures - -=back - -=over - -=item list_exchanges [-p I] [I ...] - -List queue information by virtual host. Each line printed describes an -exchange, with the requested I values separated by -tab characters. If no Is are specified then I -and I are assumed. - -=back - -=head3 Exchange information items - -=over - -=item name - -name of the exchange - -=item type - -exchange type (B, B, B, or B) - -=item durable - -whether the exchange survives server restarts - -=item auto_delete - -whether the exchange is deleted when no longer used - -=item arguments - -exchange arguments - -=back - -=over - -=item list_bindings [-p I] - -List bindings by virtual host. Each line printed describes a binding, -with the exchange name, queue name, routing key and arguments, -separated by tab characters. - -=item list_connections [I ...] - -List current AMQP connections. Each line printed describes a -connection, with the requested I values separated -by tab characters. If no Is are specified then -I, I, I and I are assumed. - -=back - -=head3 Connection information items - -=over - -=item pid - -id of the Erlang process associated with the connection - -=item address - -server IP number - -=item port - -server port - -=item peer_address - -peer address - -=item peer_port - -peer port - -=item state - -connection state (B, B, B, B, -B, B, B) - -=item channels - -number of channels using the connection - -=item user - -username associated with the connection - -=item vhost - -virtual host - -=item timeout - -connection timeout - -=item frame_max - -maximum frame size (bytes) - -=item client_properties - -informational properties transmitted by the client during connection -establishment - -=item recv_oct - -octets received - -=item recv_cnt - -packets received - -=item send_oct - -octets sent - -=item send_cnt - -packets sent - -=item send_pend - -send queue size - -=back - -=over - -=item list_channels [I ...] - -List channel information. Each line printed describes a channel, with -the requested I values separated by tab characters. -If no Is are specified then I, I, -I, I, and I -are assumed. - -The list includes channels which are part of ordinary AMQP connections -(as listed by list_connections) and channels created by various -plug-ins and other extensions. - -=back - -=head3 Channel information items - -=over - -=item pid - -id of the Erlang process associated with the channel - -=item connection - -id of the Erlang process associated with the connection to which the -channel belongs - -=item number - -the number of the channel, which uniquely identifies it within a -connection - -=item user - -username associated with the channel - -=item vhost - -virtual host in which the channel operates - -=item transactional - -true if the channel is in transactional mode, false otherwise - -=item consumer_count - -number of logical AMQP consumers retrieving messages via the channel - -=item messages_unacknowledged - -number of messages delivered via this channel but not yet acknowledged - -=item acks_uncommitted - -number of acknowledgements received in an as yet uncommitted -transaction - -=item prefetch_count - -QoS prefetch count limit in force, 0 if unlimited - -=back - -=item list_consumers - -List consumers, i.e. subscriptions to a queue's message stream. Each -line printed shows, separated by tab characters, the name of the queue -subscribed to, the id of the channel process via which the -subscription was created and is managed, the consumer tag which -uniquely identifies the subscription within a channel, and a boolean -indicating whether acknowledgements are expected for messages -delivered to this consumer. - -=back - -The list_queues, list_exchanges, list_bindings and list_consumers -commands accept an optional virtual host parameter for which to -display results, defaulting to I<"/">. The default can be overridden -with the B<-p> flag. - -=head1 OUTPUT ESCAPING - -Various items that may appear in the output of rabbitmqctl can contain -arbitrary octets. If a octet corresponds to a non-printing ASCII -character (values 0 to 31, and 127), it will be escaped in the output, -using a sequence consisting of a backslash character followed by three -octal digits giving the octet's value (i.e., as used in string -literals in the C programming language). An octet corresponding to -the backslash character (i.e. with value 92) will be escaped using a -sequence of two backslash characters. Octets with a value of 128 or -above are not escaped, in order to preserve strings encoded with -UTF-8. - -The items to which this escaping scheme applies are: - -=over - -=item * -Usernames - -=item * -Virtual host names - -=item * -Queue names - -=item * -Exchange names - -=item * -Regular expressions used for access control - -=back - -=head1 EXAMPLES - -Create a user named foo with (initial) password bar at the Erlang node -rabbit@test: - - rabbitmqctl -n rabbit@test add_user foo bar - -Grant user named foo access to the virtual host called test at the -default Erlang node: - - rabbitmqctl map_user_vhost foo test - -Append the current logs' content to the files with ".1" suffix and reopen -them: - - rabbitmqctl rotate_logs .1 - -=head1 SEE ALSO - -rabbitmq.conf(5), rabbitmq-multi(1), rabbitmq-server(1) - -=head1 AUTHOR - -The RabbitMQ Team - -=head1 REFERENCES - -RabbitMQ Web Site: L diff -Nru rabbitmq-server-1.7.2/docs/rabbitmqctl.1.xml rabbitmq-server-2.3.1/docs/rabbitmqctl.1.xml --- rabbitmq-server-1.7.2/docs/rabbitmqctl.1.xml 1970-01-01 00:00:00.000000000 +0000 +++ rabbitmq-server-2.3.1/docs/rabbitmqctl.1.xml 2011-02-03 12:47:36.000000000 +0000 @@ -0,0 +1,1247 @@ + + + + + + + RabbitMQ Server + + The RabbitMQ Team <info@rabbitmq.com> + + + + + rabbitmqctl + 1 + RabbitMQ Service + + + + rabbitmqctl + command line tool for managing a RabbitMQ broker + + + + + rabbitmqctl + -n node + -q + command + command options + + + + + Description + + RabbitMQ is an implementation of AMQP, the emerging standard for high + performance enterprise messaging. The RabbitMQ server is a robust and + scalable implementation of an AMQP broker. + + + rabbitmqctl is a command line tool for managing a + RabbitMQ broker. It performs all actions by connecting to one of the + broker's nodes. + + + + + Options + + + -n node + + + Default node is "rabbit@server", where server is the local host. On + a host named "server.example.com", the node name of the RabbitMQ + Erlang node will usually be rabbit@server (unless RABBITMQ_NODENAME + has been set to some non-default value at broker startup time). The + output of hostname -s is usually the correct suffix to use after the + "@" sign. See rabbitmq-server(1) for details of configuring the + RabbitMQ broker. + + + + + -q + + + Quiet output mode is selected with the "-q" flag. Informational + messages are suppressed when quiet mode is in effect. + + + + + + + + Commands + + + Application and Cluster Management + + + + stop + + + Stops the Erlang node on which RabbitMQ is running. To + restart the node follow the instructions for Running + the Server in the installation + guide. + + For example: + rabbitmqctl stop + + This command instructs the RabbitMQ node to terminate. + + + + + + stop_app + + + Stops the RabbitMQ application, leaving the Erlang node + running. + + + This command is typically run prior to performing other + management actions that require the RabbitMQ application + to be stopped, e.g. reset. + + For example: + rabbitmqctl stop_app + + This command instructs the RabbitMQ node to stop the + RabbitMQ application. + + + + + + start_app + + + Starts the RabbitMQ application. + + + This command is typically run after performing other + management actions that required the RabbitMQ application + to be stopped, e.g. reset. + + For example: + rabbitmqctl start_app + + This command instructs the RabbitMQ node to start the + RabbitMQ application. + + + + + + status + + + Displays various information about the RabbitMQ broker, + such as whether the RabbitMQ application on the current + node, its version number, what nodes are part of the + broker, which of these are running. + + For example: + rabbitmqctl status + + This command displays information about the RabbitMQ + broker. + + + + + + reset + + + Return a RabbitMQ node to its virgin state. + + + Removes the node from any cluster it belongs to, removes + all data from the management database, such as configured + users and vhosts, and deletes all persistent + messages. + + + For reset and force_reset to + succeed the RabbitMQ application must have been stopped, + e.g. with stop_app. + + For example: + rabbitmqctl reset + + This command resets the RabbitMQ node. + + + + + + force_reset + + + Forcefully return a RabbitMQ node to its virgin state. + + + The force_reset command differs from + reset in that it resets the node + unconditionally, regardless of the current management + database state and cluster configuration. It should only + be used as a last resort if the database or cluster + configuration has been corrupted. + + + For reset and force_reset to + succeed the RabbitMQ application must have been stopped, + e.g. with stop_app. + + For example: + rabbitmqctl force_reset + + This command resets the RabbitMQ node. + + + + + + rotate_logs suffix + + + Instruct the RabbitMQ node to rotate the log files. + + + The RabbitMQ broker will attempt to append the current contents + of the log file to the file with name composed of the original + name and the suffix. + It will create a new file if such a file does not already exist. + When no is specified, the empty log file is + simply created at the original location; no rotation takes place. + + + When an error occurs while appending the contents of the old log + file, the operation behaves in the same way as if no was + specified. + + + This command might be helpful when you are e.g. writing your + own logrotate script and you do not want to restart the RabbitMQ + node. + + For example: + rabbitmqctl rotate_logs .1 + + This command instructs the RabbitMQ node to append the current content + of the log files to the files with names consisting of the original logs' + names and ".1" suffix, e.g. rabbit.log.1. Finally, the old log files are reopened. + + + + + + + + Cluster management + + + + cluster clusternode ... + + + + clusternode + Subset of the nodes of the cluster to which this node should be connected. + + + + Instruct the node to become member of a cluster with the + specified nodes. To cluster with currently offline nodes, + use force_cluster. + + + Cluster nodes can be of two types: disk or ram. Disk nodes + replicate data in ram and on disk, thus providing + redundancy in the event of node failure and recovery from + global events such as power failure across all nodes. Ram + nodes replicate data in ram only and are mainly used for + scalability. A cluster must always have at least one disk node. + + + If the current node is to become a disk node it needs to + appear in the cluster node list. Otherwise it becomes a + ram node. If the node list is empty or only contains the + current node then the node becomes a standalone, + i.e. non-clustered, (disk) node. + + + After executing the cluster command, whenever + the RabbitMQ application is started on the current node it + will attempt to connect to the specified nodes, thus + becoming an active node in the cluster comprising those + nodes (and possibly others). + + + The list of nodes does not have to contain all the + cluster's nodes; a subset is sufficient. Also, clustering + generally succeeds as long as at least one of the + specified nodes is active. Hence adjustments to the list + are only necessary if the cluster configuration is to be + altered radically. + + + For this command to succeed the RabbitMQ application must + have been stopped, e.g. with stop_app. Furthermore, + turning a standalone node into a clustered node requires + the node be reset first, + in order to avoid accidental destruction of data with the + cluster command. + + + For more details see the clustering guide. + + For example: + rabbitmqctl cluster rabbit@tanto hare@elena + + This command instructs the RabbitMQ node to join the + cluster with nodes rabbit@tanto and + hare@elena. If the node is one of these then + it becomes a disk node, otherwise a ram node. + + + + + force_cluster clusternode ... + + + + clusternode + Subset of the nodes of the cluster to which this node should be connected. + + + + Instruct the node to become member of a cluster with the + specified nodes. This will succeed even if the specified nodes + are offline. For a more detailed description, see + cluster. + + + Note that this variant of the cluster command just + ignores the current status of the specified nodes. + Clustering may still fail for a variety of other + reasons. + + + + + + + + Closing individual connections + + + + close_connection connectionpid explanation + + + + connectionpid + Id of the Erlang process associated with the connection to close. + + + explanation + Explanation string. + + + + Instruct the broker to close the connection associated + with the Erlang process id (see also the + list_connections + command), passing the string to the + connected client as part of the AMQP connection shutdown + protocol. + + For example: + rabbitmqctl close_connection "<rabbit@tanto.4262.0>" "go away" + + This command instructs the RabbitMQ broker to close the + connection associated with the Erlang process + id <rabbit@tanto.4262.0>, passing the + explanation go away to the connected client. + + + + + + + + User management + + Note that rabbitmqctl manages the RabbitMQ + internal user database. Users from any alternative + authentication backend will not be visible + to rabbitmqctl. + + + + add_user username password + + + + username + The name of the user to create. + + + password + The password the created user will use to log in to the broker. + + + For example: + rabbitmqctl add_user tonyg changeit + + This command instructs the RabbitMQ broker to create a + (non-administrative) user named tonyg with + (initial) password + changeit. + + + + + + delete_user username + + + + username + The name of the user to delete. + + + For example: + rabbitmqctl delete_user tonyg + + This command instructs the RabbitMQ broker to delete the + user named tonyg. + + + + + + change_password username newpassword + + + + username + The name of the user whose password is to be changed. + + + newpassword + The new password for the user. + + + For example: + rabbitmqctl change_password tonyg newpass + + This command instructs the RabbitMQ broker to change the + password for the user named tonyg to + newpass. + + + + + + clear_password username + + + + username + The name of the user whose password is to be cleared. + + + For example: + rabbitmqctl clear_password tonyg + + This command instructs the RabbitMQ broker to clear the + password for the user named + tonyg. This user now cannot log in with a password (but may be able to through e.g. SASL EXTERNAL if configured). + + + + + + set_admin username + + + + username + The name of the user whose administrative + status is to be set. + + + For example: + rabbitmqctl set_admin tonyg + + This command instructs the RabbitMQ broker to ensure the user + named tonyg is an administrator. This has no + effect when the user logs in via AMQP, but can be used to permit + the user to manage users, virtual hosts and permissions when the + user logs in via some other means (for example with the + management plugin). + + + + + + clear_admin username + + + + username + The name of the user whose administrative + status is to be cleared. + + + For example: + rabbitmqctl clear_admin tonyg + + This command instructs the RabbitMQ broker to ensure the user + named tonyg is not an administrator. + + + + + + list_users + + Lists users + For example: + rabbitmqctl list_users + + This command instructs the RabbitMQ broker to list all + users. Each result row will contain the user name and + the administrator status of the user, in that order. + + + + + + + + Access control + + Note that rabbitmqctl manages the RabbitMQ + internal user database. Permissions for users from any + alternative authorisation backend will not be visible + to rabbitmqctl. + + + + add_vhost vhostpath + + + + vhostpath + The name of the virtual host entry to create. + + + + Creates a virtual host. + + For example: + rabbitmqctl add_vhost test + + This command instructs the RabbitMQ broker to create a new + virtual host called test. + + + + + + delete_vhost vhostpath + + + + vhostpath + The name of the virtual host entry to delete. + + + + Deletes a virtual host. + + + Deleting a virtual host deletes all its exchanges, + queues, user mappings and associated permissions. + + For example: + rabbitmqctl delete_vhost test + + This command instructs the RabbitMQ broker to delete the + virtual host called test. + + + + + + list_vhosts + + + Lists virtual hosts. + + For example: + rabbitmqctl list_vhosts + + This command instructs the RabbitMQ broker to list all + virtual hosts. + + + + + + set_permissions -p vhostpath user conf write read + + + + vhostpath + The name of the virtual host to which to grant the user access, defaulting to /. + + + user + The name of the user to grant access to the specified virtual host. + + + conf + A regular expression matching resource names for which the user is granted configure permissions. + + + write + A regular expression matching resource names for which the user is granted write permissions. + + + read + A regular expression matching resource names for which the user is granted read permissions. + + + + Sets user permissions. + + For example: + rabbitmqctl set_permissions -p /myvhost tonyg "^tonyg-.*" ".*" ".*" + + This command instructs the RabbitMQ broker to grant the + user named tonyg access to the virtual host + called /myvhost, with configure permissions + on all resources whose names starts with "tonyg-", and + write and read permissions on all resources. + + + + + + clear_permissions -p vhostpath username + + + + vhostpath + The name of the virtual host to which to deny the user access, defaulting to /. + + + username + The name of the user to deny access to the specified virtual host. + + + + Sets user permissions. + + For example: + rabbitmqctl clear_permissions -p /myvhost tonyg + + This command instructs the RabbitMQ broker to deny the + user named tonyg access to the virtual host + called /myvhost. + + + + + + list_permissions -p vhostpath + + + + vhostpath + The name of the virtual host for which to list the users that have been granted access to it, and their permissions. Defaults to /. + + + + Lists permissions in a virtual host. + + For example: + rabbitmqctl list_permissions -p /myvhost + + This command instructs the RabbitMQ broker to list all + the users which have been granted access to the virtual + host called /myvhost, and the + permissions they have for operations on resources in + that virtual host. Note that an empty string means no + permissions granted. + + + + + + list_user_permissions -p vhostpath username + + + + username + The name of the user for which to list the permissions. + + + + Lists user permissions. + + For example: + rabbitmqctl list_user_permissions tonyg + + This command instructs the RabbitMQ broker to list all the + virtual hosts to which the user named tonyg + has been granted access, and the permissions the user has + for operations on resources in these virtual hosts. + + + + + + + + Server Status + + The server status queries interrogate the server and return a list of + results with tab-delimited columns. Some queries (list_queues, + list_exchanges, list_bindings, and + list_consumers) accept an + optional vhost parameter. This parameter, if present, must be + specified immediately after the query. + + + The list_queues, list_exchanges and list_bindings commands accept an + optional virtual host parameter for which to display results. The + default value is "/". + + + + + list_queues -p vhostpath queueinfoitem ... + + + Returns queue details. Queue details of the / virtual host + are returned if the "-p" flag is absent. The "-p" flag can be used to + override this default. + + + The queueinfoitem parameter is used to indicate which queue + information items to include in the results. The column order in the + results will match the order of the parameters. + queueinfoitem can take any value from the list + that follows: + + + + name + The name of the queue with non-ASCII characters escaped as in C. + + + durable + Whether or not the queue survives server restarts. + + + auto_delete + Whether the queue will be deleted automatically when no longer used. + + + arguments + Queue arguments. + + + pid + Id of the Erlang process associated with the queue. + + + owner_pid + Id of the Erlang process representing the connection + which is the exclusive owner of the queue. Empty if the + queue is non-exclusive. + + + exclusive_consumer_pid + Id of the Erlang process representing the channel of the + exclusive consumer subscribed to this queue. Empty if + there is no exclusive consumer. + + + exclusive_consumer_tag + Consumer tag of the exclusive consumer subscribed to + this queue. Empty if there is no exclusive consumer. + + + messages_ready + Number of messages ready to be delivered to clients. + + + messages_unacknowledged + Number of messages delivered to clients but not yet acknowledged. + + + messages + Sum of ready and unacknowledged messages + (queue depth). + + + consumers + Number of consumers. + + + memory + Bytes of memory consumed by the Erlang process associated with the + queue, including stack, heap and internal structures. + + + + If no queueinfoitems are specified then queue name and depth are + displayed. + + + For example: + + rabbitmqctl list_queues -p /myvhost messages consumers + + This command displays the depth and number of consumers for each + queue of the virtual host named /myvhost. + + + + + + list_exchanges -p vhostpath exchangeinfoitem ... + + + Returns exchange details. Exchange details of the / virtual host + are returned if the "-p" flag is absent. The "-p" flag can be used to + override this default. + + + The exchangeinfoitem parameter is used to indicate which + exchange information items to include in the results. The column order in the + results will match the order of the parameters. + exchangeinfoitem can take any value from the list + that follows: + + + + name + The name of the exchange with non-ASCII characters escaped as in C. + + + type + The exchange type (one of [direct, + topic, headers, + fanout]). + + + durable + Whether or not the exchange survives server restarts. + + + auto_delete + Whether the exchange will be deleted automatically when no longer used. + + + internal + Whether the exchange is internal, i.e. cannot be directly published to by a client. + + + arguments + Exchange arguments. + + + + If no exchangeinfoitems are specified then + exchange name and type are displayed. + + + For example: + + rabbitmqctl list_exchanges -p /myvhost name type + + This command displays the name and type for each + exchange of the virtual host named /myvhost. + + + + + + list_bindings -p vhostpath bindinginfoitem ... + + + Returns binding details. By default the bindings for + the / virtual host are returned. The + "-p" flag can be used to override this default. + + + The bindinginfoitem parameter is used + to indicate which binding information items to include + in the results. The column order in the results will + match the order of the parameters. + bindinginfoitem can take any value + from the list that follows: + + + + source_name + The name of the source of messages to + which the binding is attached. With non-ASCII + characters escaped as in C. + + + source_kind + The kind of the source of messages to + which the binding is attached. Currently always + queue. With non-ASCII characters escaped as in + C. + + + destination_name + The name of the destination of + messages to which the binding is attached. With + non-ASCII characters escaped as in + C. + + + destination_kind + The kind of the destination of + messages to which the binding is attached. With + non-ASCII characters escaped as in + C. + + + routing_key + The binding's routing key, with + non-ASCII characters escaped as in C. + + + arguments + The binding's arguments. + + + + If no bindinginfoitems are specified then + all above items are displayed. + + + For example: + + rabbitmqctl list_bindings -p /myvhost exchange_name queue_name + + This command displays the exchange name and queue name + of the bindings in the virtual host + named /myvhost. + + + + + + list_connections connectioninfoitem ... + + + Returns TCP/IP connection statistics. + + + The connectioninfoitem parameter is used to indicate + which connection information items to include in the results. The + column order in the results will match the order of the parameters. + connectioninfoitem can take any value from the list + that follows: + + + + + pid + Id of the Erlang process associated with the connection. + + + address + Server IP address. + + + port + Server port. + + + peer_address + Peer address. + + + peer_port + Peer port. + + + ssl + Boolean indicating whether the + connection is secured with SSL. + + + ssl_protocol + SSL protocol + (e.g. tlsv1) + + + ssl_key_exchange + SSL key exchange algorithm + (e.g. rsa) + + + ssl_cipher + SSL cipher algorithm + (e.g. aes_256_cbc) + + + ssl_hash + SSL hash function + (e.g. sha) + + + peer_cert_subject + The subject of the peer's SSL + certificate, in RFC4514 form. + + + peer_cert_issuer + The issuer of the peer's SSL + certificate, in RFC4514 form. + + + peer_cert_validity + The period for which the peer's SSL + certificate is valid. + + + state + Connection state (one of [starting, tuning, + opening, running, closing, closed]). + + + channels + Number of channels using the connection. + + + protocol + Version of the AMQP protocol in use (currently one of {0,9,1} or {0,8,0}). Note that if a client requests an AMQP 0-9 connection, we treat it as AMQP 0-9-1. + + + auth_mechanism + SASL authentication mechanism used, such as PLAIN. + + + user + Username associated with the connection. + + + vhost + Virtual host name with non-ASCII characters escaped as in C. + + + timeout + Connection timeout. + + + frame_max + Maximum frame size (bytes). + + + client_properties + Informational properties transmitted by the client + during connection establishment. + + + recv_oct + Octets received. + + + recv_cnt + Packets received. + + + send_oct + Octets send. + + + send_cnt + Packets sent. + + + send_pend + Send queue size. + + + + If no connectioninfoitems are specified then user, peer + address, peer port and connection state are displayed. + + + + For example: + + rabbitmqctl list_connections send_pend port + + This command displays the send queue size and server port for each + connection. + + + + + + list_channels channelinfoitem ... + + + Returns information on all current channels, the logical + containers executing most AMQP commands. This includes + channels that are part of ordinary AMQP connections, and + channels created by various plug-ins and other extensions. + + + The channelinfoitem parameter is used to + indicate which channel information items to include in the + results. The column order in the results will match the + order of the parameters. + channelinfoitem can take any value from the list + that follows: + + + + + pid + Id of the Erlang process associated with the connection. + + + connection + Id of the Erlang process associated with the connection + to which the channel belongs. + + + number + The number of the channel, which uniquely identifies it within + a connection. + + + user + Username associated with the channel. + + + vhost + Virtual host in which the channel operates. + + + transactional + True if the channel is in transactional mode, false otherwise. + + + consumer_count + Number of logical AMQP consumers retrieving messages via + the channel. + + + messages_unacknowledged + Number of messages delivered via this channel but not + yet acknowledged. + + + acks_uncommitted + Number of acknowledgements received in an as yet + uncommitted transaction. + + + prefetch_count + QoS prefetch count limit in force, 0 if unlimited. + + + client_flow_blocked + True if the client issued a + channel.flow{active=false} + command, blocking the server from delivering + messages to the channel's consumers. + + + + confirm + True if the channel is in confirm mode, false otherwise. + + + messages_unconfirmed + Number of published messages not yet + confirmed. On channels not in confirm mode, this + remains 0. + + + + If no channelinfoitems are specified then pid, + user, transactional, consumer_count, and + messages_unacknowledged are assumed. + + + + For example: + + rabbitmqctl list_channels connection messages_unacknowledged + + This command displays the connection process and count + of unacknowledged messages for each channel. + + + + + + list_consumers + + + List consumers, i.e. subscriptions to a queue's message + stream. Each line printed shows, separated by tab + characters, the name of the queue subscribed to, the id of + the channel process via which the subscription was created + and is managed, the consumer tag which uniquely identifies + the subscription within a channel, and a boolean + indicating whether acknowledgements are expected for + messages delivered to this consumer. + + + The output format for "list_consumers" is a list of rows containing, + in order, the queue name, channel process id, consumer tag, and a + boolean indicating whether acknowledgements are expected from the + consumer. + + + + + + + + diff -Nru rabbitmq-server-1.7.2/docs/rabbitmq-deactivate-plugins.1.pod rabbitmq-server-2.3.1/docs/rabbitmq-deactivate-plugins.1.pod --- rabbitmq-server-1.7.2/docs/rabbitmq-deactivate-plugins.1.pod 2010-02-15 16:01:13.000000000 +0000 +++ rabbitmq-server-2.3.1/docs/rabbitmq-deactivate-plugins.1.pod 1970-01-01 00:00:00.000000000 +0000 @@ -1,37 +0,0 @@ -=head1 NAME - -rabbitmq-deactivate-plugins - command line tool for deactivating plugins -in a RabbitMQ broker - -=head1 SYNOPSIS - -rabbitmq-deactivate-plugins - -=head1 DESCRIPTION - -RabbitMQ is an implementation of AMQP, the emerging standard for high -performance enterprise messaging. The RabbitMQ server is a robust and -scalable implementation of an AMQP broker. - -rabbitmq-deactivate-plugins is a command line tool for deactivating -plugins installed into the broker. - -=head1 EXAMPLES - -To deactivate all of the installed plugins in the current RabbitMQ install, -execute: - - rabbitmq-deactivate-plugins - -=head1 SEE ALSO - -L, L, L, -L, L - -=head1 AUTHOR - -The RabbitMQ Team - -=head1 REFERENCES - -RabbitMQ Web Site: L diff -Nru rabbitmq-server-1.7.2/docs/rabbitmq-multi.1.pod rabbitmq-server-2.3.1/docs/rabbitmq-multi.1.pod --- rabbitmq-server-1.7.2/docs/rabbitmq-multi.1.pod 2010-02-15 16:01:13.000000000 +0000 +++ rabbitmq-server-2.3.1/docs/rabbitmq-multi.1.pod 1970-01-01 00:00:00.000000000 +0000 @@ -1,59 +0,0 @@ -=head1 NAME - -rabbitmq-multi - start/stop local cluster RabbitMQ nodes - -=head1 SYNOPSIS - -rabbitmq-multi I [command option] - -=head1 DESCRIPTION - -RabbitMQ is an implementation of AMQP, the emerging standard for high -performance enterprise messaging. The RabbitMQ server is a robust and -scalable implementation of an AMQP broker. - -rabbitmq-multi scripts allows for easy set-up of a cluster on a single -machine. - -See also L for configuration information. - -=head1 COMMANDS - -=over - -=item start_all I - -Start count nodes with unique names, listening on all IP addresses and -on sequential ports starting from 5672. - -=item status - -Print the status of all running RabbitMQ nodes. - -=item stop_all - -Stop all local RabbitMQ nodes, - -=item rotate_logs - -Rotate log files for all local and running RabbitMQ nodes. - -=back - -=head1 EXAMPLES - -Start 3 local RabbitMQ nodes with unique, sequential port numbers: - - rabbitmq-multi start_all 3 - -=head1 SEE ALSO - -L, L, L - -=head1 AUTHOR - -The RabbitMQ Team - -=head1 REFERENCES - -RabbitMQ Web Site: L diff -Nru rabbitmq-server-1.7.2/docs/rabbitmq-multi.1.xml rabbitmq-server-2.3.1/docs/rabbitmq-multi.1.xml --- rabbitmq-server-1.7.2/docs/rabbitmq-multi.1.xml 1970-01-01 00:00:00.000000000 +0000 +++ rabbitmq-server-2.3.1/docs/rabbitmq-multi.1.xml 2011-02-03 12:47:36.000000000 +0000 @@ -0,0 +1,100 @@ + + + + + RabbitMQ Server + + The RabbitMQ Team <info@rabbitmq.com> + + + + + rabbitmq-multi + 1 + RabbitMQ Server + + + + rabbitmq-multi + start/stop local cluster RabbitMQ nodes + + + + + rabbitmq-multi + command + command options + + + + + Description + + RabbitMQ is an implementation of AMQP, the emerging standard for high +performance enterprise messaging. The RabbitMQ server is a robust and +scalable implementation of an AMQP broker. + + +rabbitmq-multi scripts allows for easy set-up of a cluster on a single +machine. + + + + + Commands + + + start_all count + + +Start count nodes with unique names, listening on all IP addresses and +on sequential ports starting from 5672. + + For example: + rabbitmq-multi start_all 3 + + Starts 3 local RabbitMQ nodes with unique, sequential port numbers. + + + + + + status + + +Print the status of all running RabbitMQ nodes. + + + + + + stop_all + + +Stop all local RabbitMQ nodes, + + + + + + rotate_logs + + +Rotate log files for all local and running RabbitMQ nodes. + + + + + + + + + + See also + + rabbitmq.conf5 + rabbitmq-server1 + rabbitmqctl1 + + + diff -Nru rabbitmq-server-1.7.2/docs/rabbitmq-server.1.pod rabbitmq-server-2.3.1/docs/rabbitmq-server.1.pod --- rabbitmq-server-1.7.2/docs/rabbitmq-server.1.pod 2010-02-15 16:01:13.000000000 +0000 +++ rabbitmq-server-2.3.1/docs/rabbitmq-server.1.pod 1970-01-01 00:00:00.000000000 +0000 @@ -1,88 +0,0 @@ -=head1 NAME - -rabbitmq-server - start RabbitMQ AMQP server - -=head1 SYNOPSIS - -rabbitmq-server [-detached] - -=head1 DESCRIPTION - -RabbitMQ is an implementation of AMQP, the emerging standard for high -performance enterprise messaging. The RabbitMQ server is a robust and -scalable implementation of an AMQP broker. - -Running rabbitmq-server in the foreground displays a banner message, -and reports on progress in the startup sequence, concluding with the -message "broker running", indicating that the RabbitMQ broker has been -started successfully. To shut down the server, just terminate the -process or use L. - -=head1 ENVIRONMENT - -=over - -=item B - -Defaults to F. Set this to the directory where -Mnesia database files should be placed. - -=item B - -Defaults to F. Log files generated by the server will -be placed in this directory. - -=item B - -Defaults to rabbit. This can be useful if you want to run more than -one node per machine - B should be unique per -erlang-node-and-machine combination. See clustering on a single -machine guide at -L for details. - -=item B - -Defaults to 0.0.0.0. This can be changed if you only want to bind to -one network interface. - -=item B - -Defaults to 5672. - -=item B - -Defaults to F. If this file is -present it is used by the server to auto-configure a RabbitMQ cluster. -See the clustering guide at L -for details. - -=back - -=head1 OPTIONS - -=over - -=item B<-detached> - -start the server process in the background - -=back - -=head1 EXAMPLES - -Run RabbitMQ AMQP server in the background: - - rabbitmq-server -detached - -=head1 SEE ALSO - -L, L, L - -=head1 AUTHOR - -The RabbitMQ Team - -=head1 REFERENCES - -RabbitMQ Web Site: L - diff -Nru rabbitmq-server-1.7.2/docs/rabbitmq-server.1.xml rabbitmq-server-2.3.1/docs/rabbitmq-server.1.xml --- rabbitmq-server-1.7.2/docs/rabbitmq-server.1.xml 1970-01-01 00:00:00.000000000 +0000 +++ rabbitmq-server-2.3.1/docs/rabbitmq-server.1.xml 2011-02-03 12:47:36.000000000 +0000 @@ -0,0 +1,132 @@ + + + + + RabbitMQ Server + + The RabbitMQ Team <info@rabbitmq.com> + + + + + rabbitmq-server + 1 + RabbitMQ Server + + + + rabbitmq-server + start RabbitMQ AMQP server + + + + + rabbitmq-server + -detached + + + + + Description + + RabbitMQ is an implementation of AMQP, the emerging standard for high +performance enterprise messaging. The RabbitMQ server is a robust and +scalable implementation of an AMQP broker. + + +Running rabbitmq-server in the foreground displays a banner message, +and reports on progress in the startup sequence, concluding with the +message "broker running", indicating that the RabbitMQ broker has been +started successfully. To shut down the server, just terminate the +process or use rabbitmqctl(1). + + + + + Environment + + + + RABBITMQ_MNESIA_BASE + + +Defaults to /var/lib/rabbitmq/mnesia. Set this to the directory where +Mnesia database files should be placed. + + + + + + RABBITMQ_LOG_BASE + + +Defaults to /var/log/rabbitmq. Log files generated by the server will +be placed in this directory. + + + + + + RABBITMQ_NODENAME + + +Defaults to rabbit. This can be useful if you want to run more than +one node per machine - RABBITMQ_NODENAME should be unique per +erlang-node-and-machine combination. See the +clustering on a single +machine guide for details. + + + + + + RABBITMQ_NODE_IP_ADDRESS + + +By default RabbitMQ will bind to all interfaces, on IPv4 and IPv6 if +available. Set this if you only want to bind to one network interface +or address family. + + + + + + RABBITMQ_NODE_PORT + + +Defaults to 5672. + + + + + + + + + Options + + + -detached + + + start the server process in the background + + For example: + rabbitmq-server -detached + + Runs RabbitMQ AMQP server in the background. + + + + + + + + See also + + rabbitmq.conf5 + rabbitmq-multi1 + rabbitmqctl1 + + + diff -Nru rabbitmq-server-1.7.2/docs/rabbitmq-service.xml rabbitmq-server-2.3.1/docs/rabbitmq-service.xml --- rabbitmq-server-1.7.2/docs/rabbitmq-service.xml 1970-01-01 00:00:00.000000000 +0000 +++ rabbitmq-server-2.3.1/docs/rabbitmq-service.xml 2011-02-03 12:47:36.000000000 +0000 @@ -0,0 +1,217 @@ + + + + + RabbitMQ Server + + The RabbitMQ Team <info@rabbitmq.com> + + + + + rabbitmq-service.bat + RabbitMQ Server + + + + rabbitmq-service.bat + manage RabbitMQ AMQP service + + + + + rabbitmq-service.bat + command + + + + + Description + + RabbitMQ is an implementation of AMQP, the emerging standard for high +performance enterprise messaging. The RabbitMQ server is a robust and +scalable implementation of an AMQP broker. + + +Running rabbitmq-service allows the RabbitMQ broker to be run as a +service on NT/2000/2003/XP/Vista® environments. The RabbitMQ broker +service can be started and stopped using the Windows® services +applet. + + +By default the service will run in the authentication context of the +local system account. It is therefore necessary to synchronise Erlang +cookies between the local system account (typically +C:\WINDOWS\.erlang.cookie and the account that will be used to +run rabbitmqctl. + + + + + Commands + + + + help + + +Display usage information. + + + + + + install + + +Install the service. The service will not be started. +Subsequent invocations will update the service parameters if +relevant environment variables were modified. + + + + + + remove + + +Remove the service. If the service is running then it will +automatically be stopped before being removed. No files will be +deleted as a consequence and rabbitmq-server will remain operable. + + + + + + start + + +Start the service. The service must have been correctly installed +beforehand. + + + + + + stop + + +Stop the service. The service must be running for this command to +have any effect. + + + + + + disable + + +Disable the service. This is the equivalent of setting the startup +type to Disabled using the service control panel. + + + + + + enable + + +Enable the service. This is the equivalent of setting the startup +type to Automatic using the service control panel. + + + + + + + + Environment + + + + RABBITMQ_SERVICENAME + + +Defaults to RabbitMQ. + + + + + + RABBITMQ_BASE + + +Defaults to the application data directory of the current user. +This is the location of log and database directories. + + + + + + + RABBITMQ_NODENAME + + +Defaults to rabbit. This can be useful if you want to run more than +one node per machine - RABBITMQ_NODENAME should be unique per +erlang-node-and-machine combination. See the +clustering on a single +machine guide for details. + + + + + + RABBITMQ_NODE_IP_ADDRESS + + +By default RabbitMQ will bind to all interfaces, on IPv4 and IPv6 if +available. Set this if you only want to bind to one network interface +or address family. + + + + + + RABBITMQ_NODE_PORT + + +Defaults to 5672. + + + + + + ERLANG_SERVICE_MANAGER_PATH + + +Defaults to C:\Program Files\erl5.5.5\erts-5.5.5\bin +(or C:\Program Files (x86)\erl5.5.5\erts-5.5.5\bin for 64-bit +environments). This is the installation location of the Erlang service +manager. + + + + + + RABBITMQ_CONSOLE_LOG + + +Set this varable to new or reuse to have the console +output from the server redirected to a file named SERVICENAME.debug +in the application data directory of the user that installed the service. +Under Vista this will be C:\Users\AppData\username\SERVICENAME. +Under previous versions of Windows this will be +C:\Documents and Settings\username\Application Data\SERVICENAME. +If RABBITMQ_CONSOLE_LOG is set to new then a new file will be +created each time the service starts. If RABBITMQ_CONSOLE_LOG is +set to reuse then the file will be overwritten each time the +service starts. The default behaviour when RABBITMQ_CONSOLE_LOG is +not set or set to a value other than new or reuse is to discard +the server output. + + + + + + diff -Nru rabbitmq-server-1.7.2/docs/remove-namespaces.xsl rabbitmq-server-2.3.1/docs/remove-namespaces.xsl --- rabbitmq-server-1.7.2/docs/remove-namespaces.xsl 1970-01-01 00:00:00.000000000 +0000 +++ rabbitmq-server-2.3.1/docs/remove-namespaces.xsl 2011-02-03 12:47:36.000000000 +0000 @@ -0,0 +1,18 @@ + + + + + + + + + + + + + + + diff -Nru rabbitmq-server-1.7.2/docs/usage.xsl rabbitmq-server-2.3.1/docs/usage.xsl --- rabbitmq-server-1.7.2/docs/usage.xsl 1970-01-01 00:00:00.000000000 +0000 +++ rabbitmq-server-2.3.1/docs/usage.xsl 2011-02-03 12:47:36.000000000 +0000 @@ -0,0 +1,78 @@ + + + + + + + + + + +%% Generated, do not edit! +-module(). +-export([usage/0]). +usage() -> %QUOTE%Usage: + + + + + + + + + + + + Options: + + + + + , + + + + + + + + + + + + + Commands: + + + + + + + + + +%QUOTE%. + + + +<> must be a member of the list [, ]. + + + + + + + + + +[] +<> + + diff -Nru rabbitmq-server-1.7.2/ebin/rabbit_app.in rabbitmq-server-2.3.1/ebin/rabbit_app.in --- rabbitmq-server-1.7.2/ebin/rabbit_app.in 2010-02-15 16:01:13.000000000 +0000 +++ rabbitmq-server-2.3.1/ebin/rabbit_app.in 2011-02-03 12:47:36.000000000 +0000 @@ -1,24 +1,37 @@ {application, rabbit, %% -*- erlang -*- [{description, "RabbitMQ"}, {id, "RabbitMQ"}, - {vsn, "1.7.2"}, + {vsn, "2.3.1"}, {modules, []}, {registered, [rabbit_amqqueue_sup, rabbit_log, rabbit_node_monitor, - rabbit_persister, rabbit_router, rabbit_sup, - rabbit_tcp_client_sup]}, + rabbit_tcp_client_sup, + rabbit_direct_client_sup]}, {applications, [kernel, stdlib, sasl, mnesia, os_mon]}, -%% we also depend on ssl but it shouldn't be in here as we don't -%% actually want to start it +%% we also depend on crypto, public_key and ssl but they shouldn't be +%% in here as we don't actually want to start it {mod, {rabbit, []}}, - {env, [{tcp_listeners, [{"0.0.0.0", 5672}]}, + {env, [{tcp_listeners, [5672]}, {ssl_listeners, []}, {ssl_options, []}, {vm_memory_high_watermark, 0.4}, + {msg_store_index_module, rabbit_msg_store_ets_index}, + {backing_queue_module, rabbit_variable_queue}, + {persister_max_wrap_entries, 500}, + {persister_hibernate_after, 10000}, + {msg_store_file_size_limit, 16777216}, + {queue_index_max_journal_entries, 262144}, {default_user, <<"guest">>}, {default_pass, <<"guest">>}, + {default_user_is_admin, true}, {default_vhost, <<"/">>}, - {default_permissions, [<<".*">>, <<".*">>, <<".*">>]}]}]}. + {default_permissions, [<<".*">>, <<".*">>, <<".*">>]}, + {cluster_nodes, []}, + {server_properties, []}, + {collect_statistics, none}, + {auth_mechanisms, ['PLAIN', 'AMQPLAIN']}, + {auth_backends, [rabbit_auth_backend_internal]}, + {delegate_count, 16}]}]}. diff -Nru rabbitmq-server-1.7.2/generate_deps rabbitmq-server-2.3.1/generate_deps --- rabbitmq-server-1.7.2/generate_deps 2010-02-15 16:01:13.000000000 +0000 +++ rabbitmq-server-2.3.1/generate_deps 2011-02-03 12:47:36.000000000 +0000 @@ -2,18 +2,21 @@ %% -*- erlang -*- -mode(compile). -main([IncludeDir, ErlDir, EbinDir, TargetFile]) -> - ErlDirContents = filelib:wildcard("*.erl", ErlDir), - ErlFiles = [filename:join(ErlDir, FileName) || FileName <- ErlDirContents], +%% We expect the list of Erlang source and header files to arrive on +%% stdin, with the entries colon-separated. +main([TargetFile, EbinDir]) -> + ErlsAndHrls = [ string:strip(S,left) || + S <- string:tokens(io:get_line(""), ":\n")], + ErlFiles = [F || F <- ErlsAndHrls, lists:suffix(".erl", F)], Modules = sets:from_list( [list_to_atom(filename:basename(FileName, ".erl")) || - FileName <- ErlDirContents]), - Headers = sets:from_list( - [filename:join(IncludeDir, FileName) || - FileName <- filelib:wildcard("*.hrl", IncludeDir)]), + FileName <- ErlFiles]), + HrlFiles = [F || F <- ErlsAndHrls, lists:suffix(".hrl", F)], + IncludeDirs = lists:usort([filename:dirname(Path) || Path <- HrlFiles]), + Headers = sets:from_list(HrlFiles), Deps = lists:foldl( fun (Path, Deps1) -> - dict:store(Path, detect_deps(IncludeDir, EbinDir, + dict:store(Path, detect_deps(IncludeDirs, EbinDir, Modules, Headers, Path), Deps1) end, dict:new(), ErlFiles), @@ -23,19 +26,21 @@ ok; (Path, Dep, ok) -> Module = filename:basename(Path, ".erl"), - ok = file:write(Hdl, [EbinDir, "/", Module, ".beam:"]), + ok = file:write(Hdl, [EbinDir, "/", Module, ".beam: ", + Path]), ok = sets:fold(fun (E, ok) -> file:write(Hdl, [" ", E]) end, ok, Dep), - file:write(Hdl, [" ", ErlDir, "/", Module, ".erl\n"]) + file:write(Hdl, ["\n"]) end, ok, Deps), ok = file:write(Hdl, [TargetFile, ": ", escript:script_name(), "\n"]), ok = file:sync(Hdl), ok = file:close(Hdl). -detect_deps(IncludeDir, EbinDir, Modules, Headers, Path) -> - {ok, Forms} = epp:parse_file(Path, [IncludeDir], [{use_specs, true}]), +detect_deps(IncludeDirs, EbinDir, Modules, Headers, Path) -> + {ok, Forms} = epp:parse_file(Path, IncludeDirs, [{use_specs, true}]), lists:foldl( - fun ({attribute, _LineNumber, behaviour, Behaviour}, Deps) -> + fun ({attribute, _LineNumber, Attribute, Behaviour}, Deps) + when Attribute =:= behaviour orelse Attribute =:= behavior -> case sets:is_element(Behaviour, Modules) of true -> sets:add_element( [EbinDir, "/", atom_to_list(Behaviour), ".beam"], diff -Nru rabbitmq-server-1.7.2/include/rabbit_auth_backend_spec.hrl rabbitmq-server-2.3.1/include/rabbit_auth_backend_spec.hrl --- rabbitmq-server-1.7.2/include/rabbit_auth_backend_spec.hrl 1970-01-01 00:00:00.000000000 +0000 +++ rabbitmq-server-2.3.1/include/rabbit_auth_backend_spec.hrl 2011-02-03 12:47:35.000000000 +0000 @@ -0,0 +1,32 @@ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License +%% at http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and +%% limitations under the License. +%% +%% The Original Code is RabbitMQ. +%% +%% The Initial Developer of the Original Code is VMware, Inc. +%% Copyright (c) 2007-2011 VMware, Inc. All rights reserved. +%% + +-ifdef(use_specs). + +-spec(description/0 :: () -> [{atom(), any()}]). + +-spec(check_user_login/2 :: (rabbit_types:username(), [term()]) -> + {'ok', rabbit_types:user()} | + {'refused', string(), [any()]} | + {'error', any()}). +-spec(check_vhost_access/3 :: (rabbit_types:user(), rabbit_types:vhost(), + rabbit_access_control:vhost_permission_atom()) -> + boolean() | {'error', any()}). +-spec(check_resource_access/3 :: (rabbit_types:user(), + rabbit_types:r(atom()), + rabbit_access_control:permission_atom()) -> + boolean() | {'error', any()}). +-endif. diff -Nru rabbitmq-server-1.7.2/include/rabbit_auth_mechanism_spec.hrl rabbitmq-server-2.3.1/include/rabbit_auth_mechanism_spec.hrl --- rabbitmq-server-1.7.2/include/rabbit_auth_mechanism_spec.hrl 1970-01-01 00:00:00.000000000 +0000 +++ rabbitmq-server-2.3.1/include/rabbit_auth_mechanism_spec.hrl 2011-02-03 12:47:35.000000000 +0000 @@ -0,0 +1,27 @@ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License +%% at http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and +%% limitations under the License. +%% +%% The Original Code is RabbitMQ. +%% +%% The Initial Developer of the Original Code is VMware, Inc. +%% Copyright (c) 2007-2011 VMware, Inc. All rights reserved. +%% + +-ifdef(use_specs). + +-spec(description/0 :: () -> [{atom(), any()}]). +-spec(init/1 :: (rabbit_net:socket()) -> any()). +-spec(handle_response/2 :: (binary(), any()) -> + {'ok', rabbit_types:user()} | + {'challenge', binary(), any()} | + {'protocol_error', string(), [any()]} | + {'refused', string(), [any()]}). + +-endif. diff -Nru rabbitmq-server-1.7.2/include/rabbit_backing_queue_spec.hrl rabbitmq-server-2.3.1/include/rabbit_backing_queue_spec.hrl --- rabbitmq-server-1.7.2/include/rabbit_backing_queue_spec.hrl 1970-01-01 00:00:00.000000000 +0000 +++ rabbitmq-server-2.3.1/include/rabbit_backing_queue_spec.hrl 2011-02-03 12:47:35.000000000 +0000 @@ -0,0 +1,67 @@ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License +%% at http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and +%% limitations under the License. +%% +%% The Original Code is RabbitMQ. +%% +%% The Initial Developer of the Original Code is VMware, Inc. +%% Copyright (c) 2007-2011 VMware, Inc. All rights reserved. +%% + +-type(fetch_result(Ack) :: + ('empty' | + %% Message, IsDelivered, AckTag, Remaining_Len + {rabbit_types:basic_message(), boolean(), Ack, non_neg_integer()})). +-type(is_durable() :: boolean()). +-type(attempt_recovery() :: boolean()). +-type(purged_msg_count() :: non_neg_integer()). +-type(confirm_required() :: boolean()). +-type(message_properties_transformer() :: + fun ((rabbit_types:message_properties()) + -> rabbit_types:message_properties())). + +-spec(start/1 :: ([rabbit_amqqueue:name()]) -> 'ok'). +-spec(stop/0 :: () -> 'ok'). +-spec(init/3 :: (rabbit_amqqueue:name(), is_durable(), attempt_recovery()) -> + state()). +-spec(terminate/1 :: (state()) -> state()). +-spec(delete_and_terminate/1 :: (state()) -> state()). +-spec(purge/1 :: (state()) -> {purged_msg_count(), state()}). +-spec(publish/3 :: (rabbit_types:basic_message(), + rabbit_types:message_properties(), state()) -> state()). +-spec(publish_delivered/4 :: (true, rabbit_types:basic_message(), + rabbit_types:message_properties(), state()) + -> {ack(), state()}; + (false, rabbit_types:basic_message(), + rabbit_types:message_properties(), state()) + -> {undefined, state()}). +-spec(dropwhile/2 :: + (fun ((rabbit_types:message_properties()) -> boolean()), state()) + -> state()). +-spec(fetch/2 :: (true, state()) -> {fetch_result(ack()), state()}; + (false, state()) -> {fetch_result(undefined), state()}). +-spec(ack/2 :: ([ack()], state()) -> state()). +-spec(tx_publish/4 :: (rabbit_types:txn(), rabbit_types:basic_message(), + rabbit_types:message_properties(), state()) -> state()). +-spec(tx_ack/3 :: (rabbit_types:txn(), [ack()], state()) -> state()). +-spec(tx_rollback/2 :: (rabbit_types:txn(), state()) -> {[ack()], state()}). +-spec(tx_commit/4 :: + (rabbit_types:txn(), fun (() -> any()), + message_properties_transformer(), state()) -> {[ack()], state()}). +-spec(requeue/3 :: ([ack()], message_properties_transformer(), state()) + -> state()). +-spec(len/1 :: (state()) -> non_neg_integer()). +-spec(is_empty/1 :: (state()) -> boolean()). +-spec(set_ram_duration_target/2 :: + (('undefined' | 'infinity' | number()), state()) -> state()). +-spec(ram_duration/1 :: (state()) -> {number(), state()}). +-spec(needs_idle_timeout/1 :: (state()) -> boolean()). +-spec(idle_timeout/1 :: (state()) -> state()). +-spec(handle_pre_hibernate/1 :: (state()) -> state()). +-spec(status/1 :: (state()) -> [{atom(), any()}]). diff -Nru rabbitmq-server-1.7.2/include/rabbit_exchange_type_spec.hrl rabbitmq-server-2.3.1/include/rabbit_exchange_type_spec.hrl --- rabbitmq-server-1.7.2/include/rabbit_exchange_type_spec.hrl 1970-01-01 00:00:00.000000000 +0000 +++ rabbitmq-server-2.3.1/include/rabbit_exchange_type_spec.hrl 2011-02-03 12:47:35.000000000 +0000 @@ -0,0 +1,36 @@ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License +%% at http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and +%% limitations under the License. +%% +%% The Original Code is RabbitMQ. +%% +%% The Initial Developer of the Original Code is VMware, Inc. +%% Copyright (c) 2007-2011 VMware, Inc. All rights reserved. +%% + +-ifdef(use_specs). + +-spec(description/0 :: () -> [{atom(), any()}]). +-spec(route/2 :: (rabbit_types:exchange(), rabbit_types:delivery()) + -> rabbit_router:match_result()). +-spec(validate/1 :: (rabbit_types:exchange()) -> 'ok'). +-spec(create/2 :: (boolean(), rabbit_types:exchange()) -> 'ok'). +-spec(recover/2 :: (rabbit_types:exchange(), + [rabbit_types:binding()]) -> 'ok'). +-spec(delete/3 :: (boolean(), rabbit_types:exchange(), + [rabbit_types:binding()]) -> 'ok'). +-spec(add_binding/3 :: (boolean(), rabbit_types:exchange(), + rabbit_types:binding()) -> 'ok'). +-spec(remove_bindings/3 :: (boolean(), rabbit_types:exchange(), + [rabbit_types:binding()]) -> 'ok'). +-spec(assert_args_equivalence/2 :: + (rabbit_types:exchange(), rabbit_framing:amqp_table()) + -> 'ok' | rabbit_types:connection_exit()). + +-endif. diff -Nru rabbitmq-server-1.7.2/include/rabbit_framing_spec.hrl rabbitmq-server-2.3.1/include/rabbit_framing_spec.hrl --- rabbitmq-server-1.7.2/include/rabbit_framing_spec.hrl 2010-02-15 16:01:03.000000000 +0000 +++ rabbitmq-server-2.3.1/include/rabbit_framing_spec.hrl 1970-01-01 00:00:00.000000000 +0000 @@ -1,60 +0,0 @@ -%% The contents of this file are subject to the Mozilla Public License -%% Version 1.1 (the "License"); you may not use this file except in -%% compliance with the License. You may obtain a copy of the License at -%% http://www.mozilla.org/MPL/ -%% -%% Software distributed under the License is distributed on an "AS IS" -%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the -%% License for the specific language governing rights and limitations -%% under the License. -%% -%% The Original Code is RabbitMQ. -%% -%% The Initial Developers of the Original Code are LShift Ltd, -%% Cohesive Financial Technologies LLC, and Rabbit Technologies Ltd. -%% -%% Portions created before 22-Nov-2008 00:00:00 GMT by LShift Ltd, -%% Cohesive Financial Technologies LLC, or Rabbit Technologies Ltd -%% are Copyright (C) 2007-2008 LShift Ltd, Cohesive Financial -%% Technologies LLC, and Rabbit Technologies Ltd. -%% -%% Portions created by LShift Ltd are Copyright (C) 2007-2010 LShift -%% Ltd. Portions created by Cohesive Financial Technologies LLC are -%% Copyright (C) 2007-2010 Cohesive Financial Technologies -%% LLC. Portions created by Rabbit Technologies Ltd are Copyright -%% (C) 2007-2010 Rabbit Technologies Ltd. -%% -%% All Rights Reserved. -%% -%% Contributor(s): ______________________________________. -%% - -%% TODO: much of this should be generated - --type(amqp_field_type() :: - 'longstr' | 'signedint' | 'decimal' | 'timestamp' | - 'table' | 'byte' | 'double' | 'float' | 'long' | - 'short' | 'bool' | 'binary' | 'void'). --type(amqp_property_type() :: - 'shortstr' | 'longstr' | 'octet' | 'shortint' | 'longint' | - 'longlongint' | 'timestamp' | 'bit' | 'table'). -%% we could make this more precise but ultimately are limited by -%% dialyzer's lack of support for recursive types --type(amqp_table() :: [{binary(), amqp_field_type(), any()}]). -%% TODO: make this more precise --type(amqp_class_id() :: non_neg_integer()). -%% TODO: make this more precise --type(amqp_properties() :: tuple()). -%% TODO: make this more precise --type(amqp_method() :: tuple()). -%% TODO: make this more precise --type(amqp_method_name() :: atom()). --type(channel_number() :: non_neg_integer()). --type(resource_name() :: binary()). --type(routing_key() :: binary()). --type(username() :: binary()). --type(password() :: binary()). --type(vhost() :: binary()). --type(ctag() :: binary()). --type(exchange_type() :: 'direct' | 'topic' | 'fanout'). --type(binding_key() :: binary()). diff -Nru rabbitmq-server-1.7.2/include/rabbit.hrl rabbitmq-server-2.3.1/include/rabbit.hrl --- rabbitmq-server-1.7.2/include/rabbit.hrl 2010-02-15 16:01:03.000000000 +0000 +++ rabbitmq-server-2.3.1/include/rabbit.hrl 2011-02-03 12:47:35.000000000 +0000 @@ -1,42 +1,34 @@ -%% The contents of this file are subject to the Mozilla Public License -%% Version 1.1 (the "License"); you may not use this file except in -%% compliance with the License. You may obtain a copy of the License at -%% http://www.mozilla.org/MPL/ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License +%% at http://www.mozilla.org/MPL/ %% -%% Software distributed under the License is distributed on an "AS IS" -%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the -%% License for the specific language governing rights and limitations -%% under the License. +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and +%% limitations under the License. %% -%% The Original Code is RabbitMQ. +%% The Original Code is RabbitMQ. %% -%% The Initial Developers of the Original Code are LShift Ltd, -%% Cohesive Financial Technologies LLC, and Rabbit Technologies Ltd. -%% -%% Portions created before 22-Nov-2008 00:00:00 GMT by LShift Ltd, -%% Cohesive Financial Technologies LLC, or Rabbit Technologies Ltd -%% are Copyright (C) 2007-2008 LShift Ltd, Cohesive Financial -%% Technologies LLC, and Rabbit Technologies Ltd. -%% -%% Portions created by LShift Ltd are Copyright (C) 2007-2010 LShift -%% Ltd. Portions created by Cohesive Financial Technologies LLC are -%% Copyright (C) 2007-2010 Cohesive Financial Technologies -%% LLC. Portions created by Rabbit Technologies Ltd are Copyright -%% (C) 2007-2010 Rabbit Technologies Ltd. -%% -%% All Rights Reserved. -%% -%% Contributor(s): ______________________________________. +%% The Initial Developer of the Original Code is VMware, Inc. +%% Copyright (c) 2007-2011 VMware, Inc. All rights reserved. %% --record(user, {username, password}). +-record(user, {username, + is_admin, + auth_backend, %% Module this user came from + impl %% Scratch space for that module + }). + +-record(internal_user, {username, password_hash, is_admin}). -record(permission, {configure, write, read}). -record(user_vhost, {username, virtual_host}). -record(user_permission, {user_vhost, permission}). -record(vhost, {virtual_host, dummy}). --record(connection, {user, timeout_sec, frame_max, vhost, client_properties}). +-record(connection, {protocol, user, timeout_sec, frame_max, vhost, + client_properties}). -record(content, {class_id, @@ -44,134 +36,50 @@ properties_bin, %% either 'none', or an encoded properties binary %% Note: at most one of properties and properties_bin can be %% 'none' at once. + protocol, %% The protocol under which properties_bin was encoded payload_fragments_rev %% list of binaries, in reverse order (!) }). -record(resource, {virtual_host, kind, name}). --record(exchange, {name, type, durable, auto_delete, arguments}). +-record(exchange, {name, type, durable, auto_delete, internal, arguments}). --record(amqqueue, {name, durable, auto_delete, arguments, pid}). +-record(amqqueue, {name, durable, auto_delete, exclusive_owner = none, + arguments, pid}). %% mnesia doesn't like unary records, so we add a dummy 'value' field -record(route, {binding, value = const}). -record(reverse_route, {reverse_binding, value = const}). --record(binding, {exchange_name, key, queue_name, args = []}). --record(reverse_binding, {queue_name, key, exchange_name, args = []}). +-record(binding, {source, key, destination, args = []}). +-record(reverse_binding, {destination, key, source, args = []}). --record(listener, {node, protocol, host, port}). +-record(listener, {node, protocol, host, ip_address, port}). --record(basic_message, {exchange_name, routing_key, content, persistent_key}). +-record(basic_message, {exchange_name, routing_key, content, guid, + is_persistent}). -record(ssl_socket, {tcp, ssl}). --record(delivery, {mandatory, immediate, txn, sender, message}). - --record(amqp_error, {name, explanation, method = none}). +-record(delivery, {mandatory, immediate, txn, sender, message, + msg_seq_no}). +-record(amqp_error, {name, explanation = "", method = none}). -%%---------------------------------------------------------------------------- - --ifdef(use_specs). - --include("rabbit_framing_spec.hrl"). +-record(event, {type, props, timestamp}). --type(maybe(T) :: T | 'none'). --type(erlang_node() :: atom()). --type(ssl_socket() :: #ssl_socket{}). --type(socket() :: port() | ssl_socket()). --type(thunk(T) :: fun(() -> T)). --type(info_key() :: atom()). --type(info() :: {info_key(), any()}). --type(regexp() :: binary()). - -%% this is really an abstract type, but dialyzer does not support them --type(guid() :: any()). --type(txn() :: guid()). --type(pkey() :: guid()). --type(r(Kind) :: - #resource{virtual_host :: vhost(), - kind :: Kind, - name :: resource_name()}). --type(queue_name() :: r('queue')). --type(exchange_name() :: r('exchange')). --type(user() :: - #user{username :: username(), - password :: password()}). --type(permission() :: - #permission{configure :: regexp(), - write :: regexp(), - read :: regexp()}). --type(amqqueue() :: - #amqqueue{name :: queue_name(), - durable :: boolean(), - auto_delete :: boolean(), - arguments :: amqp_table(), - pid :: maybe(pid())}). --type(exchange() :: - #exchange{name :: exchange_name(), - type :: exchange_type(), - durable :: boolean(), - auto_delete :: boolean(), - arguments :: amqp_table()}). --type(binding() :: - #binding{exchange_name :: exchange_name(), - queue_name :: queue_name(), - key :: binding_key()}). -%% TODO: make this more precise by tying specific class_ids to -%% specific properties --type(undecoded_content() :: - #content{class_id :: amqp_class_id(), - properties :: 'none', - properties_bin :: binary(), - payload_fragments_rev :: [binary()]} | - #content{class_id :: amqp_class_id(), - properties :: amqp_properties(), - properties_bin :: 'none', - payload_fragments_rev :: [binary()]}). --type(unencoded_content() :: undecoded_content()). --type(decoded_content() :: - #content{class_id :: amqp_class_id(), - properties :: amqp_properties(), - properties_bin :: maybe(binary()), - payload_fragments_rev :: [binary()]}). --type(encoded_content() :: - #content{class_id :: amqp_class_id(), - properties :: maybe(amqp_properties()), - properties_bin :: binary(), - payload_fragments_rev :: [binary()]}). --type(content() :: undecoded_content() | decoded_content()). --type(basic_message() :: - #basic_message{exchange_name :: exchange_name(), - routing_key :: routing_key(), - content :: content(), - persistent_key :: maybe(pkey())}). --type(message() :: basic_message()). --type(delivery() :: - #delivery{mandatory :: boolean(), - immediate :: boolean(), - txn :: maybe(txn()), - sender :: pid(), - message :: message()}). -%% this really should be an abstract type --type(msg_id() :: non_neg_integer()). --type(msg() :: {queue_name(), pid(), msg_id(), boolean(), message()}). --type(listener() :: - #listener{node :: erlang_node(), - protocol :: atom(), - host :: string() | atom(), - port :: non_neg_integer()}). --type(not_found() :: {'error', 'not_found'}). --type(routing_result() :: 'routed' | 'unroutable' | 'not_delivered'). --type(amqp_error() :: - #amqp_error{name :: atom(), - explanation :: string(), - method :: atom()}). --endif. +-record(message_properties, {expiry, needs_confirming = false}). %%---------------------------------------------------------------------------- --define(COPYRIGHT_MESSAGE, "Copyright (C) 2007-2010 LShift Ltd., Cohesive Financial Technologies LLC., and Rabbit Technologies Ltd."). +-define(COPYRIGHT_MESSAGE, "Copyright (C) 2007-2011 VMware, Inc."). -define(INFORMATION_MESSAGE, "Licensed under the MPL. See http://www.rabbitmq.com/"). +-define(PROTOCOL_VERSION, "AMQP 0-9-1 / 0-9 / 0-8"). +-define(ERTS_MINIMUM, "5.6.3"). + +-define(MAX_WAIT, 16#ffffffff). + +-define(HIBERNATE_AFTER_MIN, 1000). +-define(DESIRED_HIBERNATE, 10000). +-define(STATS_INTERVAL, 5000). -ifdef(debug). -define(LOGDEBUG0(F), rabbit_log:debug(F)). diff -Nru rabbitmq-server-1.7.2/include/rabbit_msg_store.hrl rabbitmq-server-2.3.1/include/rabbit_msg_store.hrl --- rabbitmq-server-1.7.2/include/rabbit_msg_store.hrl 1970-01-01 00:00:00.000000000 +0000 +++ rabbitmq-server-2.3.1/include/rabbit_msg_store.hrl 2011-02-03 12:47:35.000000000 +0000 @@ -0,0 +1,26 @@ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License +%% at http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and +%% limitations under the License. +%% +%% The Original Code is RabbitMQ. +%% +%% The Initial Developer of the Original Code is VMware, Inc. +%% Copyright (c) 2007-2011 VMware, Inc. All rights reserved. +%% + +-include("rabbit.hrl"). + +-ifdef(use_specs). + +-type(msg() :: any()). + +-endif. + +-record(msg_location, + {guid, ref_count, file, offset, total_size}). diff -Nru rabbitmq-server-1.7.2/include/rabbit_msg_store_index.hrl rabbitmq-server-2.3.1/include/rabbit_msg_store_index.hrl --- rabbitmq-server-1.7.2/include/rabbit_msg_store_index.hrl 1970-01-01 00:00:00.000000000 +0000 +++ rabbitmq-server-2.3.1/include/rabbit_msg_store_index.hrl 2011-02-03 12:47:35.000000000 +0000 @@ -0,0 +1,45 @@ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License +%% at http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and +%% limitations under the License. +%% +%% The Original Code is RabbitMQ. +%% +%% The Initial Developer of the Original Code is VMware, Inc. +%% Copyright (c) 2007-2011 VMware, Inc. All rights reserved. +%% + +-include("rabbit_msg_store.hrl"). + +%%---------------------------------------------------------------------------- + +-ifdef(use_specs). + +-type(dir() :: any()). +-type(index_state() :: any()). +-type(keyvalue() :: any()). +-type(fieldpos() :: non_neg_integer()). +-type(fieldvalue() :: any()). + +-spec(new/1 :: (dir()) -> index_state()). +-spec(recover/1 :: (dir()) -> rabbit_types:ok_or_error2(index_state(), any())). +-spec(lookup/2 :: + (rabbit_guid:guid(), index_state()) -> ('not_found' | keyvalue())). +-spec(insert/2 :: (keyvalue(), index_state()) -> 'ok'). +-spec(update/2 :: (keyvalue(), index_state()) -> 'ok'). +-spec(update_fields/3 :: (rabbit_guid:guid(), ({fieldpos(), fieldvalue()} | + [{fieldpos(), fieldvalue()}]), + index_state()) -> 'ok'). +-spec(delete/2 :: (rabbit_guid:guid(), index_state()) -> 'ok'). +-spec(delete_object/2 :: (keyvalue(), index_state()) -> 'ok'). +-spec(delete_by_file/2 :: (fieldvalue(), index_state()) -> 'ok'). +-spec(terminate/1 :: (index_state()) -> any()). + +-endif. + +%%---------------------------------------------------------------------------- diff -Nru rabbitmq-server-1.7.2/INSTALL rabbitmq-server-2.3.1/INSTALL --- rabbitmq-server-1.7.2/INSTALL 2010-02-15 16:01:08.000000000 +0000 +++ rabbitmq-server-2.3.1/INSTALL 2011-02-03 12:47:36.000000000 +0000 @@ -8,251 +8,380 @@ =========================================================================== - Messaging that just works - RabbitMQ - * Home - * Download - * Documentation - * Get Started - * Services - * Partners - * Community - * Cloud - * FAQ - * Search + SpringSource + __________________________ [ ] + + * Download + * Documentation + * Get Started + * Services + * Community + * Blog + + Here you can find instructions to get your server download installed and + running for your chosen platform. + + RabbitMQ Server Installation + + * Introduction + * Windows + + * Install from the Windows Bundle + + * Install Server Components + + * Find Command Scripts and App Data + + * Run RabbitMQ Server as an Application + + * Run RabbitMQ Server as a Service + + * Manage the Broker - -------------------------------------------------------------------------- + * Logging - This section contains server installation instructions for various - platforms. You can download example programs, utilities, Java client - libraries and bindings to various third-party management, monitoring and - routing tools from http://www.rabbitmq.com/. + * Debian GNU/Linux and Ubuntu Linux - Table of Contents + * Install the Server - * Windows + * Run RabbitMQ Server - * Installation from a complete bundle + * RPM based Linux (CentOS, Fedora, OpenSuse, RedHat) - * Installation from separate components + * Install the Server - * Running the Server as an Application + * Run RabbitMQ Server - * Running the Server as a Service + * Mac OS X with MacPorts - * Logging + * Install the Server - * Debian GNU/Linux and Ubuntu Linux + * Run RabbitMQ Server - * Installation Instructions + * Mac OS X with Homebrew - * Running the Server + * Install the Server - * Logging + * Run RabbitMQ Server - * Configuration File + * Solaris - * RPM based Linux (CentOS, Fedora, OpenSuse, RedHat, etc.) + * Installation Instructions - * Installation Instructions + * Generic Unix or Linux (BSD, Mac OS X) - * Running the Server + * Install the Server - * Logging + * Run RabbitMQ Server - * Mac OS X with MacPorts + * Manage the Server - * Installation Instructions + * Logging - * Running the Server + * Troubleshooting + * Configuration File - * Generic Unix or Linux (BSD, Mac OS X, etc.) +Introduction - * Installation Instructions + Each installation requires Erlang, which also provides Mnesia (DBMS) and + logging support for the brokers. Mnesia and log directories may need + read/write permissions for the service account. - * Running the Server + Management scripts start and stop the broker, and set up a default + environment. Scripts and commands need to run as a privileged user e.g. an + administrator on Windows, or with root permission (e.g. sudo) on Unix-like + systems. - * Logging + If you use non-default file locations, account or service names or ports, + these environment variables should be edited as described. + Other configuration may be defined in a .config file using the + Configuration example. - * Troubleshooting - * Configuration File + These are the steps, done manually or via a packager: + + 1. Install Erlang + 2. Install RabbitMQ server + 3. Configure environment + 4. Start service or daemon Windows - Installation from a complete bundle + You can either use files from the Windows bundle, or download Erlang and + the RabbitMQ server as indicated below. + + Install from the Windows Bundle + + For convenience, the Windows Bundle zip file already contains the Erlang + system installer and the RabbitMQ broker server binary distribution files + needed for the next section. It also includes .Net and Java client + libraries for RabbitMQ. + + Just extract the files from the bundle and then follow the instructions to + install each server component. + + Install Server Components + + Install Erlang + + Install a recent version of Erlang/OTP. Versions R13B03 and higher + should work on Windows. Extract it from the Windows bundle or + choose a version from http://www.erlang.org/download.html + + Run the .exe file. Using default installation options, Erlang will + appear in the Start Menu, and \erl5.7.4\bin\erl.exe will be in + C:\Program Files or C:\Program Files (x86). + + ERLANG_HOME + + Set ERLANG_HOME to where you actually put your Erlang + installation, e.g. C:\Program Files\erl5.7.4 (full path). The + RabbitMQ batch files will attempt to execute + %ERLANG_HOME%\bin\erl.exe. + + Go to Start>Settings>Control Panel>System>Advanced>Environment + Variables. Create the system environment variable ERLANG_HOME and + set it to the full path of the directory which contains + bin\erl.exe. + + Install RabbitMQ Server + + Download rabbitmq-server-windows-2.3.1.zip from the download page + or extract it from the Windows bundle. + + From the zip file, extract the folder named rabbitmq_server-2.3.1 + into C:\Program Files\RabbitMQ (or somewhere suitable for + application files). + + Find Command Scripts and App Data + + Commands for RabbitMQ Server + + Within the rabbitmq_server-2.3.1\sbin directory are some scripts + which run commands to control the RabbitMQ server. + + The RabbitMQ server can be run as either an application or service + (not both). + + * rabbitmq-server.bat starts the broker as an application. + * rabbitmq-service.bat manages the service and starts the + broker. + * rabbitmqctl.bat manages a running broker. + + Log in as an administrator. To see the output, run these from a + Command Prompt in the sbin directory. + + Note: On Vista/Windows7 you will need to elevate privilege (e.g. + right-click on the icon to select Run as Administrator). + + Find Commands Easily + Set up the system path so you can find the server and sbin + directory easily. + * Create a system environment variable (e.g. RABBITMQ_SERVER) for + "C:\Program Files\RabbitMQ\rabbitmq_server-2.3.1". Adjust this + if you put rabbitmq_server-2.3.1 elsewhere, or if you upgrade + versions. + * Append the literal string ";%RABBITMQ_SERVER%\sbin" to your + system path (aka %PATH%). Now you can run rabbitmq commands from + any (administrator) Command Prompt. + + You will need to navigate to rabbitmq_server-2.3.1\sbin to run + commands if your system path does not contain the RabbitMQ sbin + directory. + + Finding Application Data + + By default, the RabbitMQ logs and Mnesia database are stored in + the current user's Application Data directory e.g. C:\Documents + and Settings\User\Application Data or C:\Documents and + Settings\User\AppData\Roaming (Windows Vista). + + Execute echo %APPDATA% at a Command Prompt to find this directory. + Alternatively, Start > Run %APPDATA% will open this folder. + + Setting Environment Variables on Windows + + If you need to customise names, ports, locations, it is easiest to + configure environment variables in the Windows dialogue: + Start > Settings > Control Panel > System > Advanced > Environmment Variables. + Create or edit the system or user variable name and value. After + changes you should restart the broker. + + Run RabbitMQ Server as an Application + + The application is started by the rabbitmq-server.bat script in sbin. + + Customise RabbitMQ Server Environment Variables + You may need to customise some environment variables for your + installation. Environment variable defaults set within the + rabbitmq-server.bat file are: + + RABBITMQ_BASE + Defaults to the RabbitMQ directory in %APPDATA%. This base + directory contains sub-directories for the RabbitMQ server's + database and log files. Alternatively, set RABBITMQ_MNESIA_BASE + and RABBITMQ_LOG_BASE individually. + + RABBITMQ_MNESIA_BASE + Defaults to %RABBITMQ_BASE%\db. Set this to the directory where + Mnesia database files should be placed. + + RABBITMQ_LOG_BASE + Defaults to %RABBITMQ_BASE%\log. Log files generated by the server + will be placed in this directory. + + COMPUTERNAME + Defaults to localhost. + + RABBITMQ_NODENAME + Defaults to rabbit@%COMPUTERNAME%. This can be useful if you want + to run more than one node per machine - RABBITMQ_NODENAME should + be unique per erlang-node-and-machine combination. See clustering + on a single machine for more. + + RABBITMQ_NODE_IP_ADDRESS + Defaults to the empty string - meaning bind to all network + interfaces. This can be changed if you only want to bind to one + network interface. + + RABBITMQ_NODE_PORT + Defaults to 5672. + + RABBITMQ_CONFIG_FILE + Defaults to %RABBITMQ_BASE%\rabbitmq. If this file is present it + is used by the server to configure RabbitMQ components. The + .config extension is automatically appended by the Erlang runtime. + See the section on the configuration file for details. This file + is also used to auto-configure RabbitMQ clusters. See the + clustering guide for details. + + Start the Broker as an Application + + Run the command + + rabbitmq-server -detached + + Alternatively, you can double-click the rabbitmq-server.bat file + in Windows Explorer. + + If you start by double-clicking, a Command Prompt window opens, + displays a banner message, reports on progress in the startup + sequence, and concludes with "broker running". This shows that the + RabbitMQ broker has been started successfully. + + If you started without the -detached option, e.g. by + double-clicking, you will need a second Command Prompt window to + control the application cleanly. Note: Closing the original + Command Prompt window will forcefully shut down a server started + this way. + + Stop or Manage the Broker + rabbitmq-server only starts the broker. To manage the broker use + rabbitmqctl commands. + + Run RabbitMQ Server as a Service + + The service will run in the security context of the system account without + the need for a user to be logged in on a console. This is normally more + appropriate for production use. The server should not be run as a service + and application simultaneously. + + The service runs using the rabbitmq-service.bat script in sbin. + + Customise RabbitMQ Service Environment Variables + + Note: After setting environment variables, you may need to install + the service again. + + The rabbitmq-service.bat script recognises many of the same + environment variables as rabbitmq-server.bat (see the previous + section), as well as a few additional ones. Of particular + importance are: + + ERLANG_SERVICE_MANAGER_PATH + The default value of this path is C:\Program + Files\erl5.7.4\erts-5.7.4\bin. This is the location of erlsrv.exe, + the Erlang service wrapper script. + + RABBITMQ_SERVICENAME + Defaults to RabbitMQ. This name will appear in the list of + services reported by the operating system. + + RABBITMQ_CONSOLE_LOG + Set this variable to new or reuse to redirect console output from + the server to a file named %RABBITMQ_SERVICENAME%.debug in the + default RABBITMQ_BASE directory. + + * If not set, console output from the server will be discarded + (default). + * new A new file will be created each time the service starts. + * reuse The file will be overwritten each time the service + starts. + + Install the Service + + Install the service by running + + rabbitmq-service install + + A service with the name defined by RABBITMQ_SERVICENAME should now + appear in the Windows Services control panel + (Start > Run services.msc). + + Managing the Service + + To manage the service (install, remove, start, stop, enable, + disable), use rabbitmq-service.bat commands. Refer to the service + guide for more information and troubleshooting guidance. You can + also use the Windows Services panel (services.msc) to perform some + of the same functions as the service script. + + Start the Broker as a Service + + To start the broker, execute + + rabbitmq-service start + + If the output from this command is "Service RABBITMQ_SERVICENAME + started", then the service was started correctly. + + Confirm the service named RABBITMQ_SERVICENAME reports a "Started" + status in Services: + Start > Run services.msc. + + Manage the Broker + To manage the broker use rabbitmqctl commands. + + Manage the Broker + + To stop the broker or check its status, use rabbitmqctl.bat in sbin (as an + administrator). - The bundle zip file contains all the files listed in the next section, - including an Erlang system installer and the RabbitMQ broker binary - distribution, the .Net client, as well as an archive of the Java - librabbitmq client library. - - Installation from separate components - - * Install a recent version of Erlang. Any recent version should work, - but we are developing against Erlang R12B-5 on Windows, which can be - downloaded from http://www.erlang.org/download/otp_win32_R12B-5.exe - * Download rabbitmq-server-version.zip, where version is the version of - RabbitMQ Server you want to install. - * Contained in the zip file is a directory named - rabbitmq_server-version. You should extract this folder to somewhere - appropriate for application files, for example C:\Program - Files\RabbitMQ. Once the files are successfully extracted, you should - end up with a structure similar to C:\Program - Files\RabbitMQ\rabbitmq_server-N.N.N. You will also need to set the - environment variable ERLANG_HOME to point to your Erlang directory - (e.g. C:\Program Files\erl5.7.4). - * Within the sbin directory is a file rabbitmq-server.bat which may need - customisation for your installation, especially if you use a different - version of Erlang. Environment variables set within the batch file are - - ERLANG_HOME - Set this to the base directory of your Erlang - installation, e.g. C:\Program Files\erl5.7.4. The - RabbitMQ batch file expects the file bin\erl.exe to be - present at this location. The default setting determines - the base directory from the script location, which will - work in most cases. - - RABBITMQ_BASE - Defaults to the directory RabbitMQ in the user's - application data directory, e.g. C:\Documents and - Settings\User\Application Data\RabbitMQ. Under Windows - Vista the location would be slightly different, e.g. - C:\Documents and Settings\User\AppData\Roaming\RabbitMQ - This is the base directory containing sub-directories for - the RabbitMQ Server's database and log files. - Alternatively, set RABBITMQ_MNESIA_BASE and - RABBITMQ_LOG_BASE individually. - - RABBITMQ_MNESIA_BASE - Defaults to %RABBITMQ_BASE%\db. Set this to the directory - where Mnesia database files should be placed. - - RABBITMQ_LOG_BASE - Defaults to %RABBITMQ_BASE%\log. Log files generated by - the server will be placed in this directory. - - RABBITMQ_NODENAME - Defaults to rabbit. This can be useful if you want to run - more than one node per machine - RABBITMQ_NODENAME should - be unique per erlang-node-and-machine combination. See - clustering on a single machine for more. - - RABBITMQ_NODE_IP_ADDRESS - Defaults to 0.0.0.0. This can be changed if you only want - to bind to one network interface. Please note that - Windows 2000 (and maybe earlier versions) do not - understand "0.0.0.0" and will need to have this set - explicitly. - - RABBITMQ_NODE_PORT - Defaults to 5672. - - RABBITMQ_CLUSTER_CONFIG_FILE - Defaults to %RABBITMQ_BASE%\rabbitmq_cluster.config. If - this file is present it is used by the server to - auto-configure a RabbitMQ cluster. See the clustering - guide for details. - - RABBITMQ_CONFIG_FILE - Defaults to %RABBITMQ_BASE%\rabbitmq. If this file is - present it is used by the server to configure RabbitMQ - application components. Note that the .config extension - is automatically appended by the Erlang runtime. - - Running the Server as an Application - - Currently, it's sufficient to change directory to - rabbitmq_server-version\sbin and double-click the rabbitmq-server.bat - file. - - The shell window that results displays a banner message, and reports on - progress in the startup sequence, concluding with the message "broker - running", indicating that the RabbitMQ broker has been started - successfully. To shut down the server, close the shell window. - Alternatively, use the rabbitmqctl stop command. - - Running the Server as a Service - - The server can be run as a service instead of an application. The server - will run in the security context of the system account, without the need - for a user to be logged in on a console. The server should not be run as a - service and application simultaneously. - - * The rabbitmq-service.bat script in the sbin directory is used to - manage the service. The rabbitmq-service.bat script recognises many of - the same environment variables as rabbitmq-server.bat (see the - previous section), as well as a few additional ones. Of particular - importance are: - - RABBITMQ_BASE - This is the base directory containing sub-directories for - database and log files. The account that the service will - run as must have read and write access to this location. - The default location will not normally meet this - requirement, so this variable must be set explicitly and - the permissions must be verified. - - ERLANG_SERVICE_MANAGER_PATH - The default value of this path is C:\Program - Files\erl5.7.4\erts-5.7.4\bin. The given path must - contain the erlsrv.exe Erlang service wrapper script. - - RABBITMQ_SERVICENAME - This name will appear in the list of services reported by - the operating system. - - RABBITMQ_CONSOLE_LOG - Set this variable to new or reuse to have the console - output from the server redirected to a file named - RABBITMQ_SERVICENAME.debug in the default RABBITMQ_BASE - directory. If RABBITMQ_CONSOLE_LOG is set to new then a - new file will be created each time the service starts. If - RABBITMQ_CONSOLE_LOG is set to reuse then the file will - be overwritten each time the service starts. If - RABBITMQ_CONSOLE_LOG is unset then console output from - the server will be discarded. - - * Log in as an administrator and open a command shell (cmd.exe) in the - sbin directory where the broker was installed. Install the service by - executing - - rabbitmq-service.bat install - - * Ensure that a service with the name RABBITMQ_SERVICENAME now appears - in the services control panel (services.msc). Confirm that the service - account has full access to the RABBITMQ_BASE, RABBITMQ_MNESIA_BASE and - RABBITMQ_LOG_BASE directories. - * If the service account is different from the account that will run the - rabbitmqctl.bat script then the Erlang security cookies for those - accounts must be synchronised before running rabbitmqctl.bat. The - cookie is stored in a file named .erlang.cookie and stored in the home - or %APPDATA% directory of the account. Local service account cookies - are normally stored in C:\WINDOWS\.erlang.cookie while user account - cookies are typically stored in C:\Documents and - Settings\User\.erlang.cookie. The contents of the two cookie files - must be identical. - * Execute - - rabbitmq-service.bat start - - to start the server. If the output from this command is "Service - RABBITMQ_SERVICENAME started", then the service was started correctly. - If the output reads "The process terminated unexpectedly" instead, - then the service did not start correctly. Check that the environment - variables are set correctly. The logfiles in RABBITMQ_BASE may also - contain useful diagnostic information. - * Confirm that the service list in the operating system service list - (services.msc) reports a "Started" status for the service named - RABBITMQ_SERVICENAME. - * Ensure that the Erlang cookie file created by the service contains the - same string as the one used by the account that will subsequently - execute the rabbitmqctl.bat script. + Synchronise Erlang Cookies (when running a Windows Service) - Refer to the service guide for a complete description of - rabbitmq-service.bat arguments. + Erlang Security Cookies used by the service account and the user + running rabbitmqctl.bat must be synchronised for rabbitmqctl.bat + to function. + + To ensure Erlang cookie files contain the same string, copy the + .erlang.cookie file from the windows directory + (C:\WINDOWS\.erlang.cookie) to replace the user .erlang.cookie. + The user cookie will be either in %APPDATA% or the user's home + directory e.g. C:\Documents and Settings\User\.erlang.cookie. + + Stopping the Broker + + Use rabbitmqctl stop. + + Checking the Broker Status + + Use rabbitmqctl status. All rabbitmqctl commands will report the + node absence if no broker is running (i.e. nodedown). + + More info on rabbitmqctl commands + + More info on administration Logging @@ -264,113 +393,102 @@ retain a complete log history the startup scripts append the contents to the corresponding .1 files prior to starting the broker. - You can easily write your own log rotation script using the - rotate_logs_all command in rabbitmq-multi.bat or the rotate_logs command - in rabbitmqctl.bat if you want to perform log rotation selectively. + You can rotate logs using rabbitmqctl rotate_logs or rabbitmq-multi + rotate_logs. Debian GNU/Linux and Ubuntu Linux - Installation Instructions + Install the Server - * Install the rabbitmq-server package from your chosen distribution - at - the time of writing, RabbitMQ is included in Debian unstable (sid), - Debian testing (squeeze), Ubuntu 9.04 (Jaunty Jackalope), and the - upcoming Ubuntu 9.10 (Karmic Koala). - * Alternatively, install rabbitmq-server_version_all.deb, which you can - download by hand, or by using our APT repository. The server should - start with appropriate defaults. Note that the package depends on - erlang-base packages, so if you use apt on a Debian distribution that - has erlang-base packages available, all dependencies should be - properly met. - * To adjust the settings used for starting the server, edit - /etc/rabbitmq/rabbitmq.conf, setting environment variables as - described in the Generic Unix section below. You can additionally set - NODE_COUNT in /etc/default/rabbitmq to define the number of nodes to - run on a single machine. - - Running the Server - - The server is started as a daemon by default when the RabbitMQ Server - package is installed. Start and stop the server as usual for Debian using - invoke-rc.d rabbitmq-server stop/start/etc. - - The server is set up to run as system user rabbitmq. This means that if - you change the location of the Mnesia database or logs as described below, - you must ensure the files are owned by this user. - - The startup script waits for a limited amount of time, 30 seconds by - default, for the broker nodes to start. When that time is exceeded a - TIMEOUT is reported. The cause of this could be a misconfiguration or - other error. However, it is also possible that the node simply is taking a - long time to start up, perhaps because the machine it is running on is - heavily loaded, or there are many persisted messages or durable queues and - exchanges that need to be recovered. For that reason the timeout can be - adjusted by setting MULTI_START_ARGS to "-maxwait timeout_in_seconds" in - /etc/rabbitmq/rabbitmq.conf. + Install from a package - Logging + Install the rabbitmq-server package - at the time of writing, + RabbitMQ is included in Debian unstable (sid), Debian testing + (squeeze), and Ubuntu 9.04 (Jaunty Jackalope) and onwards. - Output from the server is sent to a RABBITMQ_NODENAME.log file in the - RABBITMQ_LOG_BASE directory. Additional log data is written to - RABBITMQ_NODENAME-sasl.log. + Alternatively, install rabbitmq-server_2.3.1-1_all.deb from the + download page, or by using our APT repository. - The log files use logrotate program to do all the necessary rotation and - compression (when needed). The script runs weekly, but you can easily - change that. By default logrotate will handle files located in default - /var/log/rabbitmq directory. See /etc/logrotate.d/rabbitmq-server for - default logrotate configuration file. - - Configuration File - - The server configuration file (rabbitmq.config) should be created at - /etc/rabbitmq/rabbitmq.config. For details of the contents of this file, - see the Config File Contents section. + If you use apt on a Debian distribution that has erlang-base + packages available, all dependencies should be met automatically. -RPM based Linux (CentOS, Fedora, OpenSuse, RedHat, etc.) + Run RabbitMQ Server - Installation Instructions + Customise RabbitMQ Environment Variables - * The RabbitMQ server is included in Fedora, so for Fedora and RHEL - users we recommend to install the rabbitmq-server package via the - distribution's package manager. - * Alternatively, install rabbitmq-server-version-architecture.rpm (which - you can download by hand at our download page). Note that the package - depends on erlang package(s). We strongly recommend using your - distribution's packaged version of Erlang to run the server. See EPEL - if there is no official erlang package for your distribution. - * To adjust the settings used for starting the server, edit - /etc/rabbitmq/rabbitmq.conf, setting environment variables as - described in the Generic Unix section below. You can additionally set - NODE_COUNT in /etc/sysconfig/rabbitmq to define the number of nodes to - run on a single machine. - - Running the Server - - The server is not started as a daemon by default when the RabbitMQ Server - package is installed. Start and stop the server as usual using - /sbin/service rabbitmq-server stop/start/etc. To start the daemon by - default when the system boots, run chkconfig rabbitmq-server on. - - The server is set up to run as system user rabbitmq. This means that if - you change the location of the Mnesia database or logs as described below, - you must ensure the files are owned by this user. - - The startup script waits for a limited amount of time, 30 seconds by - default, for the broker nodes to start. When that time is exceeded a - TIMEOUT is reported. The cause of this could be a misconfiguration or - other error. However, it is also possible that the node simply is taking a - long time to start up, perhaps because the machine it is running on is - heavily loaded, or there are many persisted messages or durable queues and - exchanges that need to be recovered. For that reason the timeout can be - adjusted by setting MULTI_START_ARGS to "-maxwait timeout_in_seconds" in - /etc/rabbitmq/rabbitmq.conf. + The server should start using defaults. See the Generic Unix + installation section below for how to set environment variables. - Logging + You can additionally set NODE_COUNT in /etc/default/rabbitmq to + define the number of nodes to run on a single machine. + + Start the Server + + The server is started as a daemon by default when the RabbitMQ + server package is installed. + + As an administrator, start and stop the server as usual for Debian + using invoke-rc.d rabbitmq-server stop/start/etc. + + Note: The server is set up to run as system user rabbitmq. If you + change the location of the Mnesia database or the logs, you must + ensure the files are owned by this user (and also update the + environment variables). + + Manage the Server + See the generic Unix section for info on rabbitmqctl commands and + logging, and to configure components. + +RPM based Linux (CentOS, Fedora, OpenSuse, RedHat) + + Install the Server - Logging uses logrotate script in the same way as for Debian GNU + Install from a package + + The RabbitMQ server is included in Fedora, so for Fedora and RHEL + users we recommend to install the rabbitmq-server package via the + distribution's package manager. + + Alternatively, install rabbitmq-server-2.3.1-architecture.rpm + (which you can download from the download page). We strongly + recommend using your distribution's packaged version of Erlang to + run the server. See EPEL if there is no official Erlang package + for your distribution. + + Run RabbitMQ Server + + Customise RabbitMQ Environment Variables + + The server should start using defaults. See the Generic Unix + installation section below for how to set environment variables. + + You can additionally set NODE_COUNT in /etc/sysconfig/rabbitmq to + define the number of nodes to run on a single machine. + + Start the Server + + The server is not started as a daemon by default when the RabbitMQ + server package is installed. To start the daemon by default when + the system boots, as an administrator run chkconfig + rabbitmq-server on. + + As an administrator, start and stop the server as usual using + /sbin/service rabbitmq-server stop/start/etc. + + Note: The server is set up to run as system user rabbitmq. If you + change the location of the Mnesia database or the logs, you must + ensure the files are owned by this user (and also update the + environment variables). + + Manage the Server + See the generic Unix section for info on rabbitmqctl commands and + logging, and to configure components. Mac OS X with MacPorts + Users of Mac OS X who do not use MacPorts can use the Generic Unix + installation instructions instead. + A portfile for the RabbitMQ server is included in MacPorts. From the MacPorts home page: @@ -379,90 +497,170 @@ command-line, X11 or Aqua based open-source software on the Mac OS X operating system. - Users of Mac OS X who do not use MacPorts can use the Generic Unix - installation instructions below. - - The officially released MacPorts packaging may not always contain the - lastest released version of RabbitMQ. If you wish to install the latest - version via MacPorts, we maintain a dedicated MacPorts repository for this - purpose. + RabbitMQ maintains a dedicated MacPorts repository, which you should use + to install the latest RabbitMQ server version via MacPorts, as officially + released MacPorts packaging may not always contain the lastest released + version of RabbitMQ. + (NB It can take a very long time to install other macport updates - see + man port for options). - Installation Instructions + Install the Server Installation of the version of the RabbitMQ server distributed with MacPorts is as simple as (sudo port sync followed by) sudo port install rabbitmq-server - Running the Server + Run RabbitMQ Server To start the server, you can either use launchctl (see the instructions emitted during the sudo port install step) or you can start it from the - command line with sudo rabbitmq-server. + command line with sudo rabbitmq-server -detached. The rabbitmq-server, rabbitmqctl, and other RabbitMQ-related scripts on the path automatically (through sudo) run in the context of the rabbitmq - user so that the Erlang cookie can be read from + user, so that the Erlang cookie can be read from (sudo) $MACPORTS_PREFIX/var/lib/rabbitmq/.erlang.cookie. -Generic Unix or Linux (BSD, Mac OS X, etc.) + See the General Linux installation section for further information. + +Mac OS X with Homebrew + + A brew for the RabbitMQ server is available from Homebrew. Note: You may + not be able to install the RabbitMQ brew from inside a firewall. + + Install the Server + + Before installing make sure you have the latest brews: + + brew update + + Then, install RabbitMQ server with: + + brew install rabbitmq + + Run RabbitMQ Server + + The RabbitMQ server scripts are installed into /usr/local/sbin. This is + not automatically added to your path, so you may wish to add + PATH=$PATH:/usr/local/sbin to your .bash_profile or .profile. The server + can then be started with rabbitmq-server. + + All scripts run under your own user account. Sudo is not required. + +Solaris Installation Instructions - * Install a recent version of Erlang. - * Download rabbitmq-server-generic-unix-version.tar.gz, where version is - the version of RabbitMQ Server you want to install. - * Contained in the tarball is a directory named rabbitmq_server-version. - You should extract this into somewhere appropriate for application - binaries on your system. - * Within the sbin directory is a shell script rabbitmq-server which may - need customisation for your installation. Environment variables - available for configuration are - - RABBITMQ_MNESIA_BASE - Defaults to /var/lib/rabbitmq/mnesia. Set this to the - directory where Mnesia database files should be placed. - - RABBITMQ_LOG_BASE - Defaults to /var/log/rabbitmq. Log files generated by the - server will be placed in this directory. - - RABBITMQ_NODENAME - Defaults to rabbit. This can be useful if you want to run - more than one node per machine - RABBITMQ_NODENAME should - be unique per erlang-node-and-machine combination. See - clustering on a single machine for more. - - RABBITMQ_NODE_IP_ADDRESS - Defaults to 0.0.0.0. This can be changed if you only want - to bind to one network interface. - - RABBITMQ_NODE_PORT - Defaults to 5672. - - RABBITMQ_CLUSTER_CONFIG_FILE - Defaults to /etc/rabbitmq/rabbitmq_cluster.config. If - this file is present it is used by the server to - auto-configure a RabbitMQ cluster. See the clustering - guide for details. - - RABBITMQ_CONFIG_FILE - Defaults to /etc/rabbitmq/rabbitmq. If this file is - present it is used by the server to configure RabbitMQ - application components. Note that the .config extension - is automatically appended by the Erlang runtime. - - Running the Server - - Invoke the rabbitmq-server shell script. This displays a banner message, - and reports on progress in the startup sequence, concluding with the - message "broker running", indicating that the RabbitMQ broker has been - started successfully. To shut down the server just terminate the process. - Alternatively, use the rabbitmqctl stop command. - - You can also start the server in "detached" mode with rabbitmq-server - -detached, in which case the server process is backgrounded immediately - and nothing is written to the console. + The Generic Unix Installation Instructions may be used to install RabbitMQ + on Solaris, with two modifications: + + * The RabbitMQ shell scripts assume a standard POSIX environment. On + Solaris this requires that startup scripts be executed with the + /usr/xpg4/bin/sh shell. This can be accomplished by replacing the + first line of each script (which normally reads #!/bin/sh) with + #!/bin/xpg4/bin/sh. + * The RabbitMQ shell scripts assume the existence of the "readlink" + utility for resolving symbolic links. This can be obtained by + installing the Sunfreeware "coreutils" package or compiling GNU + coreutils. + +Generic Unix or Linux (BSD, Mac OS X) + + Install the Server + + Install a recent version of Erlang. + + Download rabbitmq-server-generic-unix-2.3.1.tar.gz from the download page. + + Contained in the tarball is a directory named rabbitmq_server-2.3.1. You + should extract this into somewhere appropriate for application binaries on + your system. The sbin directory will be found in this directory. + + Run RabbitMQ Server + + Customise RabbitMQ Environment Variables + + Within the sbin directory is a shell script rabbitmq-server which + may need customisation for your installation. Environment + variables available for configuration are: + + RABBITMQ_MNESIA_BASE + Defaults to /var/lib/rabbitmq/mnesia. Set this to the directory + where Mnesia database files should be placed. + + RABBITMQ_LOG_BASE + Defaults to /var/log/rabbitmq. Log files generated by the server + will be placed in this directory. + + RABBITMQ_NODENAME + Defaults to rabbit. This can be useful if you want to run more + than one node per machine - RABBITMQ_NODENAME should be unique per + erlang-node-and-machine combination. See clustering on a single + machine for more. + + RABBITMQ_NODE_IP_ADDRESS + Defaults to the empty string - meaning bind to all network + interfaces. This can be changed if you only want to bind to one + network interface. + + RABBITMQ_NODE_PORT + Defaults to 5672. + + RABBITMQ_CONFIG_FILE + Defaults to /etc/rabbitmq/rabbitmq. If this file is present it is + used by the server to configure RabbitMQ components. The .config + extension is automatically appended by the Erlang runtime. See the + section on the configuration file for details. This file is also + used to auto-configure RabbitMQ clusters. See the clustering guide + for details. + + Note: If the directories RABBITMQ_MNESIA_BASE and RABBITMQ_LOG_BASE do not + exist the server will attempt to create them. It is important that the + server has write permission at these locations. + + Customise RabbitMQ Environment using the rabbitmq.conf file + + Alternatively, you can create/edit /etc/rabbitmq/rabbitmq.conf to + edit environment variables. Its location is not configurable + (unlike rabbitmq.config, which is by default in the same + directory). + + Use the same environment variable names as above (but drop the + RABBITMQ_ prefix) e.g. + + #example rabbitmq.conf file entries + #Rename the node + NODENAME=bunny + #Timeout in seconds + MULTI_START_ARGS="-maxwait 40" + + More info on using rabbitmq.conf + + Start the Server + + Invoke the rabbitmq-server shell script. This displays a banner + message, and reports on progress in the startup sequence, + concluding with the message "broker running", indicating that the + RabbitMQ broker has been started successfully. + + You can also start the server in "detached" mode with + rabbitmq-server -detached, in which case the server process runs + in the background. + + Manage the Server + + To stop the server or check its status, etc., you can use rabbitmqctl (as + an administrator). It should be available on the path. All rabbitmqctl + commands will report the node absence if no broker is running. + + * Use rabbitmqctl stop to stop the server. Alternatively, just terminate + the Erlang process. + * Use rabbitmqctl status to check whether it is running. + + More info on rabbitmqctl commands + + More info on administration Logging @@ -470,63 +668,156 @@ RABBITMQ_LOG_BASE directory. Additional log data is written to RABBITMQ_NODENAME-sasl.log. - In order to retain a complete log history the startup scripts, by default, - append the contents to the corresponding .1 files prior to starting the - broker. You can easily set up log rotation process by creating logrotate - script as it is done for Debian and RPM based distributions. + These log files are created afresh every time the broker starts. In order + to retain a complete log history the startup scripts append the contents + to the corresponding .1 files prior to starting the broker. + + You can use the logrotate program to do all necessary rotation and + compression, and you can change it. By default, this script runs weekly on + files located in default /var/log/rabbitmq directory. See + /etc/logrotate.d/rabbitmq-server to configure logrotate. Troubleshooting - If the server fails to start, examine the console output and the log files - in the RABBITMQ_LOG_BASE directory for clues. Chances are there is a - configuration error, e.g. the mnesia directory cannot be created or the - TCP port on which RabbitMQ tries to listen is already taken. - - When the server fails to start, usually a crash dump file erl_crash.dump - is created in the directory where the server was started. This can provide - very detailed information on the causes of a start up failure, but its - analysis requires Erlang expertise. + Checking Broker Status + + You can use rabbitmqctl status to verify whether a broker is + running. + Normal output from a running broker without plugins follows this + pattern: + + Status of node 'rabbit@xxx' ... + [{running_applications,[{rabbit,"RabbitMQ","2.3.1"}, + {os_mon,"..."}, + {sasl,"..."}, + {mnesia,"..."}, + {stdlib,"..."}, + {kernel,"..."}, + {nodes,[{disc,['rabbit@xxx']}]} + {running_nodes,['rabbit@xxx']}] + + This example indicates that no broker is running: + + Status of node 'rabbit@xxx' ... + Error: unable to connect to node 'rabbit@xxx': nodedown + diagnostics: + - nodes and their ports on xxx: [{rabbitmqctl,...}] + - current node: 'rabbitmqctlxxx@xxx' + - current node home dir: [...] + - current node cookie hash: [...] + + If the diagnostic line looks like this: + + - nodes and their ports on xxx: [{rabbit,...},{rabbitmqctl,...}] + + and the broker logfile contains entries similar to + + Connection attempt from disallowed node... + + then you should make sure the erlang cookies are the same. + + Server Fails to Start + + When the server fails to start, usually a crash dump file + erl_crash.dump is created in the directory where the server was + started. This can provide very detailed information on the causes + of a start up failure, but its analysis requires Erlang expertise. + + If you attempt to start another server while a broker is already + running, then you will receive an error report. You can confirm + whether the broker is already running by checking the status. + + If the server fails to start, examine the console output and the + log files in the RABBITMQ_LOG_BASE directory. Configuration and + permission errors are frequently the cause, e.g. the Mnesia + directory cannot be created. + + Server Timeout on Start + + By default, the startup script waits 30 seconds for the broker + nodes to start. When that time is exceeded a TIMEOUT is reported. + The cause of this could be a misconfiguration or other error. + + It is possible that the node is legitimately taking a long time to + start up (perhaps because the machine it is running on is heavily + loaded, or because many persisted messages, durable queues or + exchanges need to be recovered). In this case, the timeout can be + adjusted by setting the environment variable MULTI_START_ARGS to + "-maxwait timeout_in_seconds" (e.g. using rabbitmq.conf). + + Windows Service fails to Install + If the service fails to install, check the service account has + full access permission for RABBITMQ_BASE, RABBITMQ_MNESIA_BASE and + RABBITMQ_LOG_BASE directories [XP, Vista]. + + Windows Service fails to Start + + If the service fails to start, make sure the service has been + installed. + + On starting the service, if the service output reads "The process + terminated unexpectedly" instead, then the service did not start + correctly. Check that the environment variables are set correctly. + The logfiles in RABBITMQ_BASE may also contain useful diagnostic + information. If the server is not behaving as expected during operation, examine the - log files for clues and use the rabbitmqctl commands from the admin guide - to obtain further information on the server status. + log files and use the rabbitmqctl commands from the admin guide to obtain + further information on the server status. For problems encountered in the handling of AMQP traffic, the AMQP capture and analysis tool may help in the analysis. - If all of the above fails, report the problem to support@rabbitmq.com and - include the log files under RABBITMQ_LOG_BASE in your report. + Failing that, it's possible that we've solved the problem for someone + else. Try using the search box at the top of our web pages to find site, + mailing list and blog information. You might also check our mailing list + archives. + + If you still can't find a solution to your problem then please post a new + message to rabbitmq-discuss@lists.rabbitmq.com (you may have to join the + mailing list first). Let us know what you were trying to do, the error you + received and relevant entries from the logfile and one of our engineers + will help you get it fixed. + + If all of the above fails, please tell us about the problem, including the + log files under RABBITMQ_LOG_BASE in your report. Configuration File - The Rabbit configuration file allows for the rabbit core application, - Erlang services and Rabbit plugins to be configured. This configuration - file is a standard Erlang configuration file, as documented on the Erlang - Config Man Page. + The configuration file rabbitmq.config allows the RabbitMQ core + application, Erlang services and RabbitMQ plugins to be configured. It is + a standard Erlang configuration file, documented on the Erlang Config Man + Page. - The location of this configuration file is distribution specific. By + The location of this configuration file is distribution-specific. By default, it is located in the following places on each platform: - * Windows - %APPDATA%\RabbitMQ\rabbitmq.config - * Debian - /etc/rabbitmq/rabbitmq.config - * RPM - /etc/rabbitmq/rabbitmq.config - * MacOS (Macports) - /opt/local/etc/rabbitmq/rabbitmq.config - * Generic UNIX - /etc/rabbitmq/rabbitmq.config - - Note that this file may not be created by default. If needed, it can be - created manually. The location can also be overidden via the - RABBITMQ_CONFIG_FILE environment variable. Note that the property value - should not include the .config extension - this is automatically appended - by the Erlang runtime. + * Windows - %APPDATA%\RabbitMQ\rabbitmq.config + * Debian - /etc/rabbitmq/rabbitmq.config + * RPM - /etc/rabbitmq/rabbitmq.config + * MacOS (Macports) - /opt/local/etc/rabbitmq/rabbitmq.config + * Generic UNIX - /etc/rabbitmq/rabbitmq.config + + If rabbitmq.config doesn't exist, it can be created manually. The location + can be changed via the RABBITMQ_CONFIG_FILE environment variable. The + Erlang runtime automatically appends the .config extension to the value of + this variable. Restart the server after the change, or remove and install + the RabbitMQ service when running as a Windows Service. - An example configuration file would look like: + An example configuration file follows: [ {mnesia, [{dump_log_write_threshold, 1000}]}, {rabbit, []} ]. - This configuration file will alter the dump_log_write_threshold for mnesia + This example will alter the dump_log_write_threshold for mnesia (increasing from the default of 100). - About us RabbitMQ™ is a Trademark of Rabbit Technologies Ltd. + This configuration file is not the same as rabbitmq.conf, which can be + used to set environment variables. A future release of RabbitMQ will unify + the two configuration mechanisms. + + Contact | About + + Copyright © 2011 VMware, Inc. All rights reserved. diff -Nru rabbitmq-server-1.7.2/LICENSE rabbitmq-server-2.3.1/LICENSE --- rabbitmq-server-1.7.2/LICENSE 2010-02-15 16:01:03.000000000 +0000 +++ rabbitmq-server-2.3.1/LICENSE 2011-02-03 12:47:35.000000000 +0000 @@ -1,5 +1,5 @@ This package, the RabbitMQ server is licensed under the MPL. For the MPL, please see LICENSE-MPL-RabbitMQ. -If you have any questions regarding licensing, please contact us at +If you have any questions regarding licensing, please contact us at info@rabbitmq.com. diff -Nru rabbitmq-server-1.7.2/LICENSE-MPL-RabbitMQ rabbitmq-server-2.3.1/LICENSE-MPL-RabbitMQ --- rabbitmq-server-1.7.2/LICENSE-MPL-RabbitMQ 2010-02-15 16:01:03.000000000 +0000 +++ rabbitmq-server-2.3.1/LICENSE-MPL-RabbitMQ 2011-02-03 12:47:35.000000000 +0000 @@ -446,28 +446,10 @@ The Original Code is RabbitMQ. - The Initial Developers of the Original Code are LShift Ltd, - Cohesive Financial Technologies LLC, and Rabbit Technologies Ltd. - - Portions created before 22-Nov-2008 00:00:00 GMT by LShift Ltd, - Cohesive Financial Technologies LLC, or Rabbit Technologies Ltd - are Copyright (C) 2007-2008 LShift Ltd, Cohesive Financial - Technologies LLC, and Rabbit Technologies Ltd. - - Portions created by LShift Ltd are Copyright (C) 2007-2010 LShift - Ltd. Portions created by Cohesive Financial Technologies LLC are - Copyright (C) 2007-2010 Cohesive Financial Technologies - LLC. Portions created by Rabbit Technologies Ltd are Copyright - (C) 2007-2010 Rabbit Technologies Ltd. - - All Rights Reserved. - - Contributor(s): ______________________________________.'' + The Initial Developer of the Original Code is VMware, Inc. + Copyright (c) 2007-2011 VMware, Inc. All rights reserved.'' [NOTE: The text of this Exhibit A may differ slightly from the text of the notices in the Source Code files of the Original Code. You should use the text of this Exhibit A rather than the text found in the Original Code Source Code for Your Modifications.] - - - diff -Nru rabbitmq-server-1.7.2/Makefile rabbitmq-server-2.3.1/Makefile --- rabbitmq-server-1.7.2/Makefile 2010-02-15 16:01:13.000000000 +0000 +++ rabbitmq-server-2.3.1/Makefile 2011-02-03 12:47:36.000000000 +0000 @@ -1,21 +1,25 @@ - TMPDIR ?= /tmp RABBITMQ_NODENAME ?= rabbit RABBITMQ_SERVER_START_ARGS ?= RABBITMQ_MNESIA_DIR ?= $(TMPDIR)/rabbitmq-$(RABBITMQ_NODENAME)-mnesia +RABBITMQ_PLUGINS_EXPAND_DIR ?= $(TMPDIR)/rabbitmq-$(RABBITMQ_NODENAME)-plugins-scratch RABBITMQ_LOG_BASE ?= $(TMPDIR) DEPS_FILE=deps.mk SOURCE_DIR=src EBIN_DIR=ebin INCLUDE_DIR=include +DOCS_DIR=docs INCLUDES=$(wildcard $(INCLUDE_DIR)/*.hrl) $(INCLUDE_DIR)/rabbit_framing.hrl -SOURCES=$(wildcard $(SOURCE_DIR)/*.erl) $(SOURCE_DIR)/rabbit_framing.erl +SOURCES=$(wildcard $(SOURCE_DIR)/*.erl) $(SOURCE_DIR)/rabbit_framing_amqp_0_9_1.erl $(SOURCE_DIR)/rabbit_framing_amqp_0_8.erl $(USAGES_ERL) BEAM_TARGETS=$(patsubst $(SOURCE_DIR)/%.erl, $(EBIN_DIR)/%.beam, $(SOURCES)) TARGETS=$(EBIN_DIR)/rabbit.app $(INCLUDE_DIR)/rabbit_framing.hrl $(BEAM_TARGETS) -WEB_URL=http://stage.rabbitmq.com/ -MANPAGES=$(patsubst %.pod, %.gz, $(wildcard docs/*.[0-9].pod)) +WEB_URL=http://www.rabbitmq.com/ +MANPAGES=$(patsubst %.xml, %.gz, $(wildcard $(DOCS_DIR)/*.[0-9].xml)) +WEB_MANPAGES=$(patsubst %.xml, %.man.xml, $(wildcard $(DOCS_DIR)/*.[0-9].xml) $(DOCS_DIR)/rabbitmq-service.xml) +USAGES_XML=$(DOCS_DIR)/rabbitmqctl.1.xml $(DOCS_DIR)/rabbitmq-multi.1.xml +USAGES_ERL=$(foreach XML, $(USAGES_XML), $(call usage_xml_to_erl, $(XML))) ifeq ($(shell python -c 'import simplejson' 2>/dev/null && echo yes),yes) PYTHON=python @@ -36,11 +40,11 @@ RABBIT_PLT=rabbit.plt ifndef USE_SPECS -# our type specs rely on features / bug fixes in dialyzer that are -# only available in R13B01 upwards (R13B01 is eshell 5.7.2) +# our type specs rely on features and bug fixes in dialyzer that are +# only available in R14A upwards (R13B04 is erts 5.7.5) # # NB: the test assumes that version number will only contain single digits -USE_SPECS=$(shell if [ $$(erl -noshell -eval 'io:format(erlang:system_info(version)), halt().') \> "5.7.1" ]; then echo "true"; else echo "false"; fi) +USE_SPECS=$(shell if [ $$(erl -noshell -eval 'io:format(erlang:system_info(version)), halt().') \> "5.7.5" ]; then echo "true"; else echo "false"; fi) endif #other args: +native +"{hipe,[o3,verbose]}" -Ddebug=true +debug_info +no_strict_record_tests @@ -52,55 +56,85 @@ SIBLING_CODEGEN_DIR=../rabbitmq-codegen/ AMQP_CODEGEN_DIR=$(shell [ -d $(SIBLING_CODEGEN_DIR) ] && echo $(SIBLING_CODEGEN_DIR) || echo codegen) -AMQP_SPEC_JSON_PATH=$(AMQP_CODEGEN_DIR)/amqp-0.8.json +AMQP_SPEC_JSON_FILES_0_9_1=$(AMQP_CODEGEN_DIR)/amqp-rabbitmq-0.9.1.json +AMQP_SPEC_JSON_FILES_0_8=$(AMQP_CODEGEN_DIR)/amqp-rabbitmq-0.8.json ERL_CALL=erl_call -sname $(RABBITMQ_NODENAME) -e ERL_EBIN=erl -noinput -pa $(EBIN_DIR) +define usage_xml_to_erl + $(subst __,_,$(patsubst $(DOCS_DIR)/rabbitmq%.1.xml, $(SOURCE_DIR)/rabbit_%_usage.erl, $(subst -,_,$(1)))) +endef + +define usage_dep + $(call usage_xml_to_erl, $(1)): $(1) $(DOCS_DIR)/usage.xsl +endef + +ifneq "$(SBIN_DIR)" "" +ifneq "$(TARGET_DIR)" "" +SCRIPTS_REL_PATH=$(shell ./calculate-relative $(TARGET_DIR)/sbin $(SBIN_DIR)) +endif +endif + +# Versions prior to this are not supported +NEED_MAKE := 3.80 +ifneq "$(NEED_MAKE)" "$(firstword $(sort $(NEED_MAKE) $(MAKE_VERSION)))" +$(error Versions of make prior to $(NEED_MAKE) are not supported) +endif + +# .DEFAULT_GOAL introduced in 3.81 +DEFAULT_GOAL_MAKE := 3.81 +ifneq "$(DEFAULT_GOAL_MAKE)" "$(firstword $(sort $(DEFAULT_GOAL_MAKE) $(MAKE_VERSION)))" +.DEFAULT_GOAL=all +endif + all: $(TARGETS) $(DEPS_FILE): $(SOURCES) $(INCLUDES) - escript generate_deps $(INCLUDE_DIR) $(SOURCE_DIR) \$$\(EBIN_DIR\) $@ + rm -f $@ + echo $(subst : ,:,$(foreach FILE,$^,$(FILE):)) | escript generate_deps $@ $(EBIN_DIR) $(EBIN_DIR)/rabbit.app: $(EBIN_DIR)/rabbit_app.in $(BEAM_TARGETS) generate_app escript generate_app $(EBIN_DIR) $@ < $< -$(EBIN_DIR)/%.beam: $(SOURCE_DIR)/%.erl +$(EBIN_DIR)/%.beam: $(SOURCE_DIR)/%.erl | $(DEPS_FILE) erlc $(ERLC_OPTS) -pa $(EBIN_DIR) $< -# ERLC_EMULATOR="erl -smp" erlc $(ERLC_OPTS) -pa $(EBIN_DIR) $< -$(INCLUDE_DIR)/rabbit_framing.hrl: codegen.py $(AMQP_CODEGEN_DIR)/amqp_codegen.py $(AMQP_SPEC_JSON_PATH) - $(PYTHON) codegen.py header $(AMQP_SPEC_JSON_PATH) $@ +$(INCLUDE_DIR)/rabbit_framing.hrl: codegen.py $(AMQP_CODEGEN_DIR)/amqp_codegen.py $(AMQP_SPEC_JSON_FILES_0_9_1) $(AMQP_SPEC_JSON_FILES_0_8) + $(PYTHON) codegen.py --ignore-conflicts header $(AMQP_SPEC_JSON_FILES_0_9_1) $(AMQP_SPEC_JSON_FILES_0_8) $@ + +$(SOURCE_DIR)/rabbit_framing_amqp_0_9_1.erl: codegen.py $(AMQP_CODEGEN_DIR)/amqp_codegen.py $(AMQP_SPEC_JSON_FILES_0_9_1) + $(PYTHON) codegen.py body $(AMQP_SPEC_JSON_FILES_0_9_1) $@ -$(SOURCE_DIR)/rabbit_framing.erl: codegen.py $(AMQP_CODEGEN_DIR)/amqp_codegen.py $(AMQP_SPEC_JSON_PATH) - $(PYTHON) codegen.py body $(AMQP_SPEC_JSON_PATH) $@ +$(SOURCE_DIR)/rabbit_framing_amqp_0_8.erl: codegen.py $(AMQP_CODEGEN_DIR)/amqp_codegen.py $(AMQP_SPEC_JSON_FILES_0_8) + $(PYTHON) codegen.py body $(AMQP_SPEC_JSON_FILES_0_8) $@ dialyze: $(BEAM_TARGETS) $(BASIC_PLT) - $(ERL_EBIN) -eval \ - "rabbit_dialyzer:halt_with_code(rabbit_dialyzer:dialyze_files(\"$(BASIC_PLT)\", \"$(BEAM_TARGETS)\"))." + dialyzer --plt $(BASIC_PLT) --no_native \ + -Wrace_conditions $(BEAM_TARGETS) # rabbit.plt is used by rabbitmq-erlang-client's dialyze make target create-plt: $(RABBIT_PLT) $(RABBIT_PLT): $(BEAM_TARGETS) $(BASIC_PLT) - cp $(BASIC_PLT) $@ - $(ERL_EBIN) -eval \ - "rabbit_dialyzer:halt_with_code(rabbit_dialyzer:add_to_plt(\"$@\", \"$(BEAM_TARGETS)\"))." + dialyzer --plt $(BASIC_PLT) --output_plt $@ --no_native \ + --add_to_plt $(BEAM_TARGETS) $(BASIC_PLT): $(BEAM_TARGETS) if [ -f $@ ]; then \ touch $@; \ else \ - $(ERL_EBIN) -eval \ - "rabbit_dialyzer:halt_with_code(rabbit_dialyzer:create_basic_plt(\"$@\"))."; \ + dialyzer --output_plt $@ --build_plt \ + --apps erts kernel stdlib compiler sasl os_mon mnesia tools \ + public_key crypto ssl; \ fi clean: rm -f $(EBIN_DIR)/*.beam rm -f $(EBIN_DIR)/rabbit.app $(EBIN_DIR)/rabbit.boot $(EBIN_DIR)/rabbit.script $(EBIN_DIR)/rabbit.rel - rm -f $(INCLUDE_DIR)/rabbit_framing.hrl $(SOURCE_DIR)/rabbit_framing.erl codegen.pyc - rm -f docs/*.[0-9].gz + rm -f $(INCLUDE_DIR)/rabbit_framing.hrl $(SOURCE_DIR)/rabbit_framing_amqp_*.erl codegen.pyc + rm -f $(DOCS_DIR)/*.[0-9].gz $(DOCS_DIR)/*.man.xml $(DOCS_DIR)/*.erl $(USAGES_ERL) rm -f $(RABBIT_PLT) rm -f $(DEPS_FILE) @@ -113,7 +147,8 @@ RABBITMQ_NODE_IP_ADDRESS="$(RABBITMQ_NODE_IP_ADDRESS)" \ RABBITMQ_NODE_PORT="$(RABBITMQ_NODE_PORT)" \ RABBITMQ_LOG_BASE="$(RABBITMQ_LOG_BASE)" \ - RABBITMQ_MNESIA_DIR="$(RABBITMQ_MNESIA_DIR)" + RABBITMQ_MNESIA_DIR="$(RABBITMQ_MNESIA_DIR)" \ + RABBITMQ_PLUGINS_EXPAND_DIR="$(RABBITMQ_PLUGINS_EXPAND_DIR)" run: all $(BASIC_SCRIPT_ENVIRONMENT_SETTINGS) \ @@ -135,7 +170,7 @@ $(BASIC_SCRIPT_ENVIRONMENT_SETTINGS) \ RABBITMQ_NODE_ONLY=true \ RABBITMQ_SERVER_START_ARGS="$(RABBITMQ_SERVER_START_ARGS) -detached" \ - ./scripts/rabbitmq-server ; sleep 1 + ./scripts/rabbitmq-server; sleep 1 start-rabbit-on-node: all echo "rabbit:start()." | $(ERL_CALL) @@ -143,8 +178,13 @@ stop-rabbit-on-node: all echo "rabbit:stop()." | $(ERL_CALL) -force-snapshot: all - echo "rabbit_persister:force_snapshot()." | $(ERL_CALL) +set-memory-alarm: all + echo "alarm_handler:set_alarm({vm_memory_high_watermark, []})." | \ + $(ERL_CALL) + +clear-memory-alarm: all + echo "alarm_handler:clear_alarm(vm_memory_high_watermark)." | \ + $(ERL_CALL) stop-node: -$(ERL_CALL) -q @@ -153,7 +193,11 @@ COVER_DIR=. start-cover: all - echo "cover:start(), rabbit_misc:enable_cover([\"$(COVER_DIR)\"])." | $(ERL_CALL) + echo "rabbit_misc:start_cover([\"rabbit\", \"hare\"])." | $(ERL_CALL) + echo "rabbit_misc:enable_cover([\"$(COVER_DIR)\"])." | $(ERL_CALL) + +start-secondary-cover: all + echo "rabbit_misc:start_cover([\"hare\"])." | $(ERL_CALL) stop-cover: all echo "rabbit_misc:report_cover(), cover:stop()." | $(ERL_CALL) @@ -169,14 +213,14 @@ >> $(TARGET_SRC_DIR)/INSTALL cp README.in $(TARGET_SRC_DIR)/README elinks -dump -no-references -no-numbering $(WEB_URL)build-server.html \ - >> $(TARGET_SRC_DIR)/BUILD + >> $(TARGET_SRC_DIR)/README sed -i.save 's/%%VSN%%/$(VERSION)/' $(TARGET_SRC_DIR)/ebin/rabbit_app.in && rm -f $(TARGET_SRC_DIR)/ebin/rabbit_app.in.save cp -r $(AMQP_CODEGEN_DIR)/* $(TARGET_SRC_DIR)/codegen/ cp codegen.py Makefile generate_app generate_deps calculate-relative $(TARGET_SRC_DIR) cp -r scripts $(TARGET_SRC_DIR) - cp -r docs $(TARGET_SRC_DIR) + cp -r $(DOCS_DIR) $(TARGET_SRC_DIR) chmod 0755 $(TARGET_SRC_DIR)/scripts/* (cd dist; tar -zcf $(TARBALL_NAME).tar.gz $(TARBALL_NAME)) @@ -188,40 +232,86 @@ rm -rf dist find . -regex '.*\(~\|#\|\.swp\|\.dump\)' -exec rm {} \; -%.gz: %.pod - pod2man \ - -n `echo $$(basename $*) | sed -e 's/\.[[:digit:]]\+//'` \ - -s `echo $$(basename $*) | sed -e 's/.*\.\([^.]\+\)/\1/'` \ - -c "RabbitMQ AMQP Server" \ - -d "" \ - -r "" \ - $< | gzip --best > $@ - -docs_all: $(MANPAGES) - -install: SCRIPTS_REL_PATH=$(shell ./calculate-relative $(TARGET_DIR)/sbin $(SBIN_DIR)) -install: all docs_all install_dirs - @[ -n "$(TARGET_DIR)" ] || (echo "Please set TARGET_DIR."; false) - @[ -n "$(SBIN_DIR)" ] || (echo "Please set SBIN_DIR."; false) - @[ -n "$(MAN_DIR)" ] || (echo "Please set MAN_DIR."; false) +# xmlto can not read from standard input, so we mess with a tmp file. +%.gz: %.xml $(DOCS_DIR)/examples-to-end.xsl + xmlto --version | grep -E '^xmlto version 0\.0\.([0-9]|1[1-8])$$' >/dev/null || opt='--stringparam man.indent.verbatims=0' ; \ + xsltproc $(DOCS_DIR)/examples-to-end.xsl $< > $<.tmp && \ + xmlto -o $(DOCS_DIR) $$opt man $<.tmp && \ + gzip -f $(DOCS_DIR)/`basename $< .xml` + rm -f $<.tmp + +# Use tmp files rather than a pipeline so that we get meaningful errors +# Do not fold the cp into previous line, it's there to stop the file being +# generated but empty if we fail +$(SOURCE_DIR)/%_usage.erl: + xsltproc --stringparam modulename "`basename $@ .erl`" \ + $(DOCS_DIR)/usage.xsl $< > $@.tmp + sed -e 's/"/\\"/g' -e 's/%QUOTE%/"/g' $@.tmp > $@.tmp2 + fold -s $@.tmp2 > $@.tmp3 + mv $@.tmp3 $@ + rm $@.tmp $@.tmp2 + +# We rename the file before xmlto sees it since xmlto will use the name of +# the file to make internal links. +%.man.xml: %.xml $(DOCS_DIR)/html-to-website-xml.xsl + cp $< `basename $< .xml`.xml && \ + xmlto xhtml-nochunks `basename $< .xml`.xml ; rm `basename $< .xml`.xml + cat `basename $< .xml`.html | \ + xsltproc --novalid $(DOCS_DIR)/remove-namespaces.xsl - | \ + xsltproc --stringparam original `basename $<` $(DOCS_DIR)/html-to-website-xml.xsl - | \ + xmllint --format - > $@ + rm `basename $< .xml`.html + +docs_all: $(MANPAGES) $(WEB_MANPAGES) + +install: install_bin install_docs - mkdir -p $(TARGET_DIR) +install_bin: all install_dirs cp -r ebin include LICENSE LICENSE-MPL-RabbitMQ INSTALL $(TARGET_DIR) chmod 0755 scripts/* - for script in rabbitmq-env rabbitmq-server rabbitmqctl rabbitmq-multi rabbitmq-activate-plugins rabbitmq-deactivate-plugins; do \ + for script in rabbitmq-env rabbitmq-server rabbitmqctl rabbitmq-multi; do \ cp scripts/$$script $(TARGET_DIR)/sbin; \ [ -e $(SBIN_DIR)/$$script ] || ln -s $(SCRIPTS_REL_PATH)/$$script $(SBIN_DIR)/$$script; \ done + mkdir -p $(TARGET_DIR)/plugins + echo Put your .ez plugin files in this directory. > $(TARGET_DIR)/plugins/README + +install_docs: docs_all install_dirs for section in 1 5; do \ mkdir -p $(MAN_DIR)/man$$section; \ - for manpage in docs/*.$$section.pod; do \ - cp docs/`basename $$manpage .pod`.gz $(MAN_DIR)/man$$section; \ + for manpage in $(DOCS_DIR)/*.$$section.gz; do \ + cp $$manpage $(MAN_DIR)/man$$section; \ done; \ done install_dirs: - mkdir -p $(SBIN_DIR) + @ OK=true && \ + { [ -n "$(TARGET_DIR)" ] || { echo "Please set TARGET_DIR."; OK=false; }; } && \ + { [ -n "$(SBIN_DIR)" ] || { echo "Please set SBIN_DIR."; OK=false; }; } && \ + { [ -n "$(MAN_DIR)" ] || { echo "Please set MAN_DIR."; OK=false; }; } && $$OK + mkdir -p $(TARGET_DIR)/sbin + mkdir -p $(SBIN_DIR) + mkdir -p $(MAN_DIR) + +$(foreach XML,$(USAGES_XML),$(eval $(call usage_dep, $(XML)))) + +# Note that all targets which depend on clean must have clean in their +# name. Also any target that doesn't depend on clean should not have +# clean in its name, unless you know that you don't need any of the +# automatic dependency generation for that target (eg cleandb). +# We want to load the dep file if *any* target *doesn't* contain +# "clean" - i.e. if removing all clean-like targets leaves something + +ifeq "$(MAKECMDGOALS)" "" +TESTABLEGOALS:=$(.DEFAULT_GOAL) +else +TESTABLEGOALS:=$(MAKECMDGOALS) +endif + +ifneq "$(strip $(patsubst clean%,,$(patsubst %clean,,$(TESTABLEGOALS))))" "" -include $(DEPS_FILE) +endif + diff -Nru rabbitmq-server-1.7.2/README rabbitmq-server-2.3.1/README --- rabbitmq-server-1.7.2/README 2010-02-15 16:01:08.000000000 +0000 +++ rabbitmq-server-2.3.1/README 2011-02-03 12:47:36.000000000 +0000 @@ -8,3 +8,90 @@ =========================================================================== + RabbitMQ + + SpringSource + __________________________ [ ] + + * Download + * Documentation + * Get Started + * Services + * Community + * Blog + + This section describes the process for obtaining a copy of the RabbitMQ + server source code, as well as instructions for building the server from + source. + + Table of Contents + + * Obtaining the source + * Required Libraries and Tools + * Building the server + +Obtaining the source + + * Either download a released source code distribution from the download + page, or + * Check the code out directly from our mercurial repositories: + + $ hg clone http://hg.rabbitmq.com/rabbitmq-codegen + $ hg clone http://hg.rabbitmq.com/rabbitmq-server + $ cd rabbitmq-server + $ make + + If you choose to check the code out using mercurial, be aware that the + code-generation module is a dependency of the server library. If you're + working with a released source code distribution, though, the + code-generation module is included. + +Required Libraries and Tools + + In order to build RabbitMQ, you will need a few tools. + + The server requires a recent version of Python and simplejson.py (an + implementation of a JSON reader and writer in Python), for generating AMQP + framing code. simplejson.py is included as a standard json library in the + Python core since 2.6 release. + + Additionally, for building the server, you will need + + * the Erlang development and runtime tools + If you are on a Debian-based system then you need the following + packages installed as a minimum: erlang-dev, erlang-base or + erlang-base-hipe, erlang-ssl, erlang-os-mon, erlang-mnesia (on some + older distributions the last three are bundled into erlang-nox). If + you are building and installing Erlang from source then you must + ensure that openssl is installed on your system. + * a recent version of GNU make + * a recent version of xsltproc, which is part of libxslt + +Building the server + + Change to the rabbitmq-server directory, and type make. + + Other interesting Makefile targets include + + all + The default target. Builds the server. + + run + Builds the server and starts an instance with an interactive + Erlang shell. This will by default create a Mnesia database in + /tmp/rabbit-mnesia, but this location can be overridden by setting + the Makefile variable MNESIA_DIR: + + make run MNESIA_DIR=/some/other/location/for/rabbit-mnesia + + clean + Removes build products and wipes the Mnesia database directory + used by the run target. See the above description of MNESIA_DIR. + + srcdist + Runs the "clean" target, and constructs a source-code distribution + tarball in ./dist. + + Contact | About + + Copyright © 2011 VMware, Inc. All rights reserved. diff -Nru rabbitmq-server-1.7.2/scripts/rabbitmq-activate-plugins rabbitmq-server-2.3.1/scripts/rabbitmq-activate-plugins --- rabbitmq-server-1.7.2/scripts/rabbitmq-activate-plugins 2010-02-15 16:01:13.000000000 +0000 +++ rabbitmq-server-2.3.1/scripts/rabbitmq-activate-plugins 1970-01-01 00:00:00.000000000 +0000 @@ -1,47 +0,0 @@ -#!/bin/sh -## The contents of this file are subject to the Mozilla Public License -## Version 1.1 (the "License"); you may not use this file except in -## compliance with the License. You may obtain a copy of the License at -## http://www.mozilla.org/MPL/ -## -## Software distributed under the License is distributed on an "AS IS" -## basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the -## License for the specific language governing rights and limitations -## under the License. -## -## The Original Code is RabbitMQ. -## -## The Initial Developers of the Original Code are LShift Ltd, -## Cohesive Financial Technologies LLC, and Rabbit Technologies Ltd. -## -## Portions created before 22-Nov-2008 00:00:00 GMT by LShift Ltd, -## Cohesive Financial Technologies LLC, or Rabbit Technologies Ltd -## are Copyright (C) 2007-2008 LShift Ltd, Cohesive Financial -## Technologies LLC, and Rabbit Technologies Ltd. -## -## Portions created by LShift Ltd are Copyright (C) 2007-2010 LShift -## Ltd. Portions created by Cohesive Financial Technologies LLC are -## Copyright (C) 2007-2010 Cohesive Financial Technologies -## LLC. Portions created by Rabbit Technologies Ltd are Copyright -## (C) 2007-2010 Rabbit Technologies Ltd. -## -## All Rights Reserved. -## -## Contributor(s): ______________________________________. -## - -. `dirname $0`/rabbitmq-env - -RABBITMQ_EBIN=${RABBITMQ_HOME}/ebin -[ "x" = "x$RABBITMQ_PLUGINS_DIR" ] && RABBITMQ_PLUGINS_DIR="${RABBITMQ_HOME}/plugins" -[ "x" = "x$RABBITMQ_PLUGINS_EXPAND_DIR" ] && RABBITMQ_PLUGINS_EXPAND_DIR="${RABBITMQ_HOME}/priv/plugins" - -exec erl \ - -pa "$RABBITMQ_EBIN" \ - -rabbit plugins_dir "\"$RABBITMQ_PLUGINS_DIR\"" \ - -rabbit plugins_expand_dir "\"$RABBITMQ_PLUGINS_EXPAND_DIR\"" \ - -rabbit rabbit_ebin "\"$RABBITMQ_EBIN\"" \ - -noinput \ - -hidden \ - -s rabbit_plugin_activator \ - -extra "$@" diff -Nru rabbitmq-server-1.7.2/scripts/rabbitmq-activate-plugins.bat rabbitmq-server-2.3.1/scripts/rabbitmq-activate-plugins.bat --- rabbitmq-server-1.7.2/scripts/rabbitmq-activate-plugins.bat 2010-02-15 16:01:13.000000000 +0000 +++ rabbitmq-server-2.3.1/scripts/rabbitmq-activate-plugins.bat 1970-01-01 00:00:00.000000000 +0000 @@ -1,67 +0,0 @@ -@echo off -REM The contents of this file are subject to the Mozilla Public License -REM Version 1.1 (the "License"); you may not use this file except in -REM compliance with the License. You may obtain a copy of the License at -REM http://www.mozilla.org/MPL/ -REM -REM Software distributed under the License is distributed on an "AS IS" -REM basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the -REM License for the specific language governing rights and limitations -REM under the License. -REM -REM The Original Code is RabbitMQ. -REM -REM The Initial Developers of the Original Code are LShift Ltd, -REM Cohesive Financial Technologies LLC, and Rabbit Technologies Ltd. -REM -REM Portions created before 22-Nov-2008 00:00:00 GMT by LShift Ltd, -REM Cohesive Financial Technologies LLC, or Rabbit Technologies Ltd -REM are Copyright (C) 2007-2008 LShift Ltd, Cohesive Financial -REM Technologies LLC, and Rabbit Technologies Ltd. -REM -REM Portions created by LShift Ltd are Copyright (C) 2007-2010 LShift -REM Ltd. Portions created by Cohesive Financial Technologies LLC are -REM Copyright (C) 2007-2010 Cohesive Financial Technologies -REM LLC. Portions created by Rabbit Technologies Ltd are Copyright -REM (C) 2007-2010 Rabbit Technologies Ltd. -REM -REM All Rights Reserved. -REM -REM Contributor(s): ______________________________________. -REM - -setlocal - -rem Preserve values that might contain exclamation marks before -rem enabling delayed expansion -set TDP0=%~dp0 -set STAR=%* -setlocal enabledelayedexpansion - -if not exist "!ERLANG_HOME!\bin\erl.exe" ( - echo. - echo ****************************** - echo ERLANG_HOME not set correctly. - echo ****************************** - echo. - echo Please either set ERLANG_HOME to point to your Erlang installation or place the - echo RabbitMQ server distribution in the Erlang lib folder. - echo. - exit /B -) - -set RABBITMQ_PLUGINS_DIR=!TDP0!..\plugins -set RABBITMQ_PLUGINS_EXPAND_DIR=!TDP0!..\priv\plugins -set RABBITMQ_EBIN_DIR=!TDP0!..\ebin - -"!ERLANG_HOME!\bin\erl.exe" ^ --pa "!RABBITMQ_EBIN_DIR!" ^ --noinput -hidden ^ --s rabbit_plugin_activator ^ --rabbit plugins_dir \""!RABBITMQ_PLUGINS_DIR:\=/!"\" ^ --rabbit plugins_expand_dir \""!RABBITMQ_PLUGINS_EXPAND_DIR:\=/!"\" ^ --rabbit rabbit_ebin \""!RABBITMQ_EBIN_DIR:\=/!"\" ^ --extra !STAR! - -endlocal -endlocal diff -Nru rabbitmq-server-1.7.2/scripts/rabbitmqctl rabbitmq-server-2.3.1/scripts/rabbitmqctl --- rabbitmq-server-1.7.2/scripts/rabbitmqctl 2010-02-15 16:01:13.000000000 +0000 +++ rabbitmq-server-2.3.1/scripts/rabbitmqctl 2011-02-03 12:47:36.000000000 +0000 @@ -1,36 +1,19 @@ #!/bin/sh -## The contents of this file are subject to the Mozilla Public License -## Version 1.1 (the "License"); you may not use this file except in -## compliance with the License. You may obtain a copy of the License at -## http://www.mozilla.org/MPL/ +## The contents of this file are subject to the Mozilla Public License +## Version 1.1 (the "License"); you may not use this file except in +## compliance with the License. You may obtain a copy of the License +## at http://www.mozilla.org/MPL/ +## +## Software distributed under the License is distributed on an "AS IS" +## basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +## the License for the specific language governing rights and +## limitations under the License. ## -## Software distributed under the License is distributed on an "AS IS" -## basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the -## License for the specific language governing rights and limitations -## under the License. +## The Original Code is RabbitMQ. ## -## The Original Code is RabbitMQ. +## The Initial Developer of the Original Code is VMware, Inc. +## Copyright (c) 2007-2011 VMware, Inc. All rights reserved. ## -## The Initial Developers of the Original Code are LShift Ltd, -## Cohesive Financial Technologies LLC, and Rabbit Technologies Ltd. -## -## Portions created before 22-Nov-2008 00:00:00 GMT by LShift Ltd, -## Cohesive Financial Technologies LLC, or Rabbit Technologies Ltd -## are Copyright (C) 2007-2008 LShift Ltd, Cohesive Financial -## Technologies LLC, and Rabbit Technologies Ltd. -## -## Portions created by LShift Ltd are Copyright (C) 2007-2010 LShift -## Ltd. Portions created by Cohesive Financial Technologies LLC are -## Copyright (C) 2007-2010 Cohesive Financial Technologies -## LLC. Portions created by Rabbit Technologies Ltd are Copyright -## (C) 2007-2010 Rabbit Technologies Ltd. -## -## All Rights Reserved. -## -## Contributor(s): ______________________________________. -## - -NODENAME=rabbit . `dirname $0`/rabbitmq-env @@ -46,4 +29,3 @@ -s rabbit_control \ -nodename $RABBITMQ_NODENAME \ -extra "$@" - diff -Nru rabbitmq-server-1.7.2/scripts/rabbitmqctl.bat rabbitmq-server-2.3.1/scripts/rabbitmqctl.bat --- rabbitmq-server-1.7.2/scripts/rabbitmqctl.bat 2010-02-15 16:01:13.000000000 +0000 +++ rabbitmq-server-2.3.1/scripts/rabbitmqctl.bat 2011-02-03 12:47:36.000000000 +0000 @@ -1,33 +1,18 @@ @echo off -REM The contents of this file are subject to the Mozilla Public License -REM Version 1.1 (the "License"); you may not use this file except in -REM compliance with the License. You may obtain a copy of the License at -REM http://www.mozilla.org/MPL/ +REM The contents of this file are subject to the Mozilla Public License +REM Version 1.1 (the "License"); you may not use this file except in +REM compliance with the License. You may obtain a copy of the License +REM at http://www.mozilla.org/MPL/ +REM +REM Software distributed under the License is distributed on an "AS IS" +REM basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +REM the License for the specific language governing rights and +REM limitations under the License. REM -REM Software distributed under the License is distributed on an "AS IS" -REM basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the -REM License for the specific language governing rights and limitations -REM under the License. +REM The Original Code is RabbitMQ. REM -REM The Original Code is RabbitMQ. -REM -REM The Initial Developers of the Original Code are LShift Ltd, -REM Cohesive Financial Technologies LLC, and Rabbit Technologies Ltd. -REM -REM Portions created before 22-Nov-2008 00:00:00 GMT by LShift Ltd, -REM Cohesive Financial Technologies LLC, or Rabbit Technologies Ltd -REM are Copyright (C) 2007-2008 LShift Ltd, Cohesive Financial -REM Technologies LLC, and Rabbit Technologies Ltd. -REM -REM Portions created by LShift Ltd are Copyright (C) 2007-2010 LShift -REM Ltd. Portions created by Cohesive Financial Technologies LLC are -REM Copyright (C) 2007-2010 Cohesive Financial Technologies -REM LLC. Portions created by Rabbit Technologies Ltd are Copyright -REM (C) 2007-2010 Rabbit Technologies Ltd. -REM -REM All Rights Reserved. -REM -REM Contributor(s): ______________________________________. +REM The Initial Developer of the Original Code is VMware, Inc. +REM Copyright (c) 2007-2011 VMware, Inc. All rights reserved. REM setlocal @@ -38,8 +23,12 @@ set STAR=%* setlocal enabledelayedexpansion +if "!COMPUTERNAME!"=="" ( + set COMPUTERNAME=localhost +) + if "!RABBITMQ_NODENAME!"=="" ( - set RABBITMQ_NODENAME=rabbit + set RABBITMQ_NODENAME=rabbit@!COMPUTERNAME! ) if not exist "!ERLANG_HOME!\bin\erl.exe" ( @@ -54,7 +43,7 @@ exit /B ) -"!ERLANG_HOME!\bin\erl.exe" -pa "!TDP0!..\ebin" -noinput -hidden !RABBITMQ_CTL_ERL_ARGS! -sname rabbitmqctl -s rabbit_control -nodename !RABBITMQ_NODENAME! -extra !STAR! +"!ERLANG_HOME!\bin\erl.exe" -pa "!TDP0!..\ebin" -noinput -hidden !RABBITMQ_CTL_ERL_ARGS! -sname rabbitmqctl!RANDOM! -s rabbit_control -nodename !RABBITMQ_NODENAME! -extra !STAR! endlocal endlocal diff -Nru rabbitmq-server-1.7.2/scripts/rabbitmq-deactivate-plugins rabbitmq-server-2.3.1/scripts/rabbitmq-deactivate-plugins --- rabbitmq-server-1.7.2/scripts/rabbitmq-deactivate-plugins 2010-02-15 16:01:13.000000000 +0000 +++ rabbitmq-server-2.3.1/scripts/rabbitmq-deactivate-plugins 1970-01-01 00:00:00.000000000 +0000 @@ -1,37 +0,0 @@ -#!/bin/sh -## The contents of this file are subject to the Mozilla Public License -## Version 1.1 (the "License"); you may not use this file except in -## compliance with the License. You may obtain a copy of the License at -## http://www.mozilla.org/MPL/ -## -## Software distributed under the License is distributed on an "AS IS" -## basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the -## License for the specific language governing rights and limitations -## under the License. -## -## The Original Code is RabbitMQ. -## -## The Initial Developers of the Original Code are LShift Ltd, -## Cohesive Financial Technologies LLC, and Rabbit Technologies Ltd. -## -## Portions created before 22-Nov-2008 00:00:00 GMT by LShift Ltd, -## Cohesive Financial Technologies LLC, or Rabbit Technologies Ltd -## are Copyright (C) 2007-2008 LShift Ltd, Cohesive Financial -## Technologies LLC, and Rabbit Technologies Ltd. -## -## Portions created by LShift Ltd are Copyright (C) 2007-2010 LShift -## Ltd. Portions created by Cohesive Financial Technologies LLC are -## Copyright (C) 2007-2010 Cohesive Financial Technologies -## LLC. Portions created by Rabbit Technologies Ltd are Copyright -## (C) 2007-2010 Rabbit Technologies Ltd. -## -## All Rights Reserved. -## -## Contributor(s): ______________________________________. -## - -. `dirname $0`/rabbitmq-env - -RABBITMQ_EBIN=${RABBITMQ_HOME}/ebin - -rm -f ${RABBITMQ_EBIN}/rabbit.rel ${RABBITMQ_EBIN}/rabbit.script ${RABBITMQ_EBIN}/rabbit.boot diff -Nru rabbitmq-server-1.7.2/scripts/rabbitmq-deactivate-plugins.bat rabbitmq-server-2.3.1/scripts/rabbitmq-deactivate-plugins.bat --- rabbitmq-server-1.7.2/scripts/rabbitmq-deactivate-plugins.bat 2010-02-15 16:01:13.000000000 +0000 +++ rabbitmq-server-2.3.1/scripts/rabbitmq-deactivate-plugins.bat 1970-01-01 00:00:00.000000000 +0000 @@ -1,45 +0,0 @@ -@echo off -REM The contents of this file are subject to the Mozilla Public License -REM Version 1.1 (the "License"); you may not use this file except in -REM compliance with the License. You may obtain a copy of the License at -REM http://www.mozilla.org/MPL/ -REM -REM Software distributed under the License is distributed on an "AS IS" -REM basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the -REM License for the specific language governing rights and limitations -REM under the License. -REM -REM The Original Code is RabbitMQ. -REM -REM The Initial Developers of the Original Code are LShift Ltd, -REM Cohesive Financial Technologies LLC, and Rabbit Technologies Ltd. -REM -REM Portions created before 22-Nov-2008 00:00:00 GMT by LShift Ltd, -REM Cohesive Financial Technologies LLC, or Rabbit Technologies Ltd -REM are Copyright (C) 2007-2008 LShift Ltd, Cohesive Financial -REM Technologies LLC, and Rabbit Technologies Ltd. -REM -REM Portions created by LShift Ltd are Copyright (C) 2007-2010 LShift -REM Ltd. Portions created by Cohesive Financial Technologies LLC are -REM Copyright (C) 2007-2010 Cohesive Financial Technologies -REM LLC. Portions created by Rabbit Technologies Ltd are Copyright -REM (C) 2007-2010 Rabbit Technologies Ltd. -REM -REM All Rights Reserved. -REM -REM Contributor(s): ______________________________________. -REM - -setlocal - -rem Preserve values that might contain exclamation marks before -rem enabling delayed expansion -set TDP0=%~dp0 -setlocal enabledelayedexpansion - -set RABBITMQ_EBIN_DIR=!TDP0!..\ebin - -del /f "!RABBITMQ_EBIN_DIR!"\rabbit.rel "!RABBITMQ_EBIN_DIR!"\rabbit.script "!RABBITMQ_EBIN_DIR!"\rabbit.boot - -endlocal -endlocal diff -Nru rabbitmq-server-1.7.2/scripts/rabbitmq-env rabbitmq-server-2.3.1/scripts/rabbitmq-env --- rabbitmq-server-1.7.2/scripts/rabbitmq-env 2010-02-15 16:01:13.000000000 +0000 +++ rabbitmq-server-2.3.1/scripts/rabbitmq-env 2011-02-03 12:47:36.000000000 +0000 @@ -1,33 +1,18 @@ #!/bin/sh -## The contents of this file are subject to the Mozilla Public License -## Version 1.1 (the "License"); you may not use this file except in -## compliance with the License. You may obtain a copy of the License at -## http://www.mozilla.org/MPL/ +## The contents of this file are subject to the Mozilla Public License +## Version 1.1 (the "License"); you may not use this file except in +## compliance with the License. You may obtain a copy of the License +## at http://www.mozilla.org/MPL/ +## +## Software distributed under the License is distributed on an "AS IS" +## basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +## the License for the specific language governing rights and +## limitations under the License. ## -## Software distributed under the License is distributed on an "AS IS" -## basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the -## License for the specific language governing rights and limitations -## under the License. +## The Original Code is RabbitMQ. ## -## The Original Code is RabbitMQ. -## -## The Initial Developers of the Original Code are LShift Ltd, -## Cohesive Financial Technologies LLC, and Rabbit Technologies Ltd. -## -## Portions created before 22-Nov-2008 00:00:00 GMT by LShift Ltd, -## Cohesive Financial Technologies LLC, or Rabbit Technologies Ltd -## are Copyright (C) 2007-2008 LShift Ltd, Cohesive Financial -## Technologies LLC, and Rabbit Technologies Ltd. -## -## Portions created by LShift Ltd are Copyright (C) 2007-2010 LShift -## Ltd. Portions created by Cohesive Financial Technologies LLC are -## Copyright (C) 2007-2010 Cohesive Financial Technologies -## LLC. Portions created by Rabbit Technologies Ltd are Copyright -## (C) 2007-2010 Rabbit Technologies Ltd. -## -## All Rights Reserved. -## -## Contributor(s): ______________________________________. +## The Initial Developer of the Original Code is VMware, Inc. +## Copyright (c) 2007-2011 VMware, Inc. All rights reserved. ## # Determine where this script is really located @@ -48,6 +33,8 @@ SCRIPT_DIR=`dirname $SCRIPT_PATH` RABBITMQ_HOME="${SCRIPT_DIR}/.." +[ "x" = "x$HOSTNAME" ] && HOSTNAME=`env hostname` +NODENAME=rabbit@${HOSTNAME%%.*} # Load configuration from the rabbitmq.conf file [ -f /etc/rabbitmq/rabbitmq.conf ] && . /etc/rabbitmq/rabbitmq.conf diff -Nru rabbitmq-server-1.7.2/scripts/rabbitmq-multi rabbitmq-server-2.3.1/scripts/rabbitmq-multi --- rabbitmq-server-1.7.2/scripts/rabbitmq-multi 2010-02-15 16:01:13.000000000 +0000 +++ rabbitmq-server-2.3.1/scripts/rabbitmq-multi 2011-02-03 12:47:36.000000000 +0000 @@ -1,35 +1,20 @@ #!/bin/sh -## The contents of this file are subject to the Mozilla Public License -## Version 1.1 (the "License"); you may not use this file except in -## compliance with the License. You may obtain a copy of the License at -## http://www.mozilla.org/MPL/ +## The contents of this file are subject to the Mozilla Public License +## Version 1.1 (the "License"); you may not use this file except in +## compliance with the License. You may obtain a copy of the License +## at http://www.mozilla.org/MPL/ +## +## Software distributed under the License is distributed on an "AS IS" +## basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +## the License for the specific language governing rights and +## limitations under the License. ## -## Software distributed under the License is distributed on an "AS IS" -## basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the -## License for the specific language governing rights and limitations -## under the License. +## The Original Code is RabbitMQ. ## -## The Original Code is RabbitMQ. +## The Initial Developer of the Original Code is VMware, Inc. +## Copyright (c) 2007-2011 VMware, Inc. All rights reserved. ## -## The Initial Developers of the Original Code are LShift Ltd, -## Cohesive Financial Technologies LLC, and Rabbit Technologies Ltd. -## -## Portions created before 22-Nov-2008 00:00:00 GMT by LShift Ltd, -## Cohesive Financial Technologies LLC, or Rabbit Technologies Ltd -## are Copyright (C) 2007-2008 LShift Ltd, Cohesive Financial -## Technologies LLC, and Rabbit Technologies Ltd. -## -## Portions created by LShift Ltd are Copyright (C) 2007-2010 LShift -## Ltd. Portions created by Cohesive Financial Technologies LLC are -## Copyright (C) 2007-2010 Cohesive Financial Technologies -## LLC. Portions created by Rabbit Technologies Ltd are Copyright -## (C) 2007-2010 Rabbit Technologies Ltd. -## -## All Rights Reserved. -## -## Contributor(s): ______________________________________. -## -NODENAME=rabbit + SCRIPT_HOME=$(dirname $0) PIDS_FILE=/var/lib/rabbitmq/pids MULTI_ERL_ARGS= diff -Nru rabbitmq-server-1.7.2/scripts/rabbitmq-multi.bat rabbitmq-server-2.3.1/scripts/rabbitmq-multi.bat --- rabbitmq-server-1.7.2/scripts/rabbitmq-multi.bat 2010-02-15 16:01:13.000000000 +0000 +++ rabbitmq-server-2.3.1/scripts/rabbitmq-multi.bat 2011-02-03 12:47:36.000000000 +0000 @@ -1,33 +1,18 @@ @echo off -REM The contents of this file are subject to the Mozilla Public License -REM Version 1.1 (the "License"); you may not use this file except in -REM compliance with the License. You may obtain a copy of the License at -REM http://www.mozilla.org/MPL/ +REM The contents of this file are subject to the Mozilla Public License +REM Version 1.1 (the "License"); you may not use this file except in +REM compliance with the License. You may obtain a copy of the License +REM at http://www.mozilla.org/MPL/ +REM +REM Software distributed under the License is distributed on an "AS IS" +REM basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +REM the License for the specific language governing rights and +REM limitations under the License. REM -REM Software distributed under the License is distributed on an "AS IS" -REM basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the -REM License for the specific language governing rights and limitations -REM under the License. +REM The Original Code is RabbitMQ. REM -REM The Original Code is RabbitMQ. -REM -REM The Initial Developers of the Original Code are LShift Ltd, -REM Cohesive Financial Technologies LLC, and Rabbit Technologies Ltd. -REM -REM Portions created before 22-Nov-2008 00:00:00 GMT by LShift Ltd, -REM Cohesive Financial Technologies LLC, or Rabbit Technologies Ltd -REM are Copyright (C) 2007-2008 LShift Ltd, Cohesive Financial -REM Technologies LLC, and Rabbit Technologies Ltd. -REM -REM Portions created by LShift Ltd are Copyright (C) 2007-2010 LShift -REM Ltd. Portions created by Cohesive Financial Technologies LLC are -REM Copyright (C) 2007-2010 Cohesive Financial Technologies -REM LLC. Portions created by Rabbit Technologies Ltd are Copyright -REM (C) 2007-2010 Rabbit Technologies Ltd. -REM -REM All Rights Reserved. -REM -REM Contributor(s): ______________________________________. +REM The Initial Developer of the Original Code is VMware, Inc. +REM Copyright (c) 2007-2011 VMware, Inc. All rights reserved. REM setlocal @@ -42,8 +27,12 @@ set RABBITMQ_BASE=!APPDATA!\RabbitMQ ) +if "!COMPUTERNAME!"=="" ( + set COMPUTERNAME=localhost +) + if "!RABBITMQ_NODENAME!"=="" ( - set RABBITMQ_NODENAME=rabbit + set RABBITMQ_NODENAME=rabbit@!COMPUTERNAME! ) if "!RABBITMQ_NODE_IP_ADDRESS!"=="" ( @@ -85,7 +74,7 @@ -pa "!TDP0!..\ebin" ^ -noinput -hidden ^ !RABBITMQ_MULTI_ERL_ARGS! ^ --sname rabbitmq_multi ^ +-sname rabbitmq_multi!RANDOM! ^ !RABBITMQ_CONFIG_ARG! ^ -s rabbit_multi ^ !RABBITMQ_MULTI_START_ARGS! ^ diff -Nru rabbitmq-server-1.7.2/scripts/rabbitmq-server rabbitmq-server-2.3.1/scripts/rabbitmq-server --- rabbitmq-server-1.7.2/scripts/rabbitmq-server 2010-02-15 16:01:13.000000000 +0000 +++ rabbitmq-server-2.3.1/scripts/rabbitmq-server 2011-02-03 12:47:36.000000000 +0000 @@ -1,40 +1,23 @@ #!/bin/sh -## The contents of this file are subject to the Mozilla Public License -## Version 1.1 (the "License"); you may not use this file except in -## compliance with the License. You may obtain a copy of the License at -## http://www.mozilla.org/MPL/ +## The contents of this file are subject to the Mozilla Public License +## Version 1.1 (the "License"); you may not use this file except in +## compliance with the License. You may obtain a copy of the License +## at http://www.mozilla.org/MPL/ ## -## Software distributed under the License is distributed on an "AS IS" -## basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the -## License for the specific language governing rights and limitations -## under the License. +## Software distributed under the License is distributed on an "AS IS" +## basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +## the License for the specific language governing rights and +## limitations under the License. ## -## The Original Code is RabbitMQ. +## The Original Code is RabbitMQ. ## -## The Initial Developers of the Original Code are LShift Ltd, -## Cohesive Financial Technologies LLC, and Rabbit Technologies Ltd. -## -## Portions created before 22-Nov-2008 00:00:00 GMT by LShift Ltd, -## Cohesive Financial Technologies LLC, or Rabbit Technologies Ltd -## are Copyright (C) 2007-2008 LShift Ltd, Cohesive Financial -## Technologies LLC, and Rabbit Technologies Ltd. -## -## Portions created by LShift Ltd are Copyright (C) 2007-2010 LShift -## Ltd. Portions created by Cohesive Financial Technologies LLC are -## Copyright (C) 2007-2010 Cohesive Financial Technologies -## LLC. Portions created by Rabbit Technologies Ltd are Copyright -## (C) 2007-2010 Rabbit Technologies Ltd. -## -## All Rights Reserved. -## -## Contributor(s): ______________________________________. +## The Initial Developer of the Original Code is VMware, Inc. +## Copyright (c) 2007-2011 VMware, Inc. All rights reserved. ## -NODENAME=rabbit -SERVER_ERL_ARGS="+K true +A30 \ +SERVER_ERL_ARGS="+K true +A30 +P 1048576 \ -kernel inet_default_listen_options [{nodelay,true}] \ -kernel inet_default_connect_options [{nodelay,true}]" -CLUSTER_CONFIG_FILE=/etc/rabbitmq/rabbitmq_cluster.config CONFIG_FILE=/etc/rabbitmq/rabbitmq LOG_BASE=/var/log/rabbitmq MNESIA_BASE=/var/lib/rabbitmq/mnesia @@ -42,7 +25,7 @@ . `dirname $0`/rabbitmq-env -DEFAULT_NODE_IP_ADDRESS=0.0.0.0 +DEFAULT_NODE_IP_ADDRESS=auto DEFAULT_NODE_PORT=5672 [ "x" = "x$RABBITMQ_NODE_IP_ADDRESS" ] && [ "x" != "x$NODE_IP_ADDRESS" ] && RABBITMQ_NODE_IP_ADDRESS=${NODE_IP_ADDRESS} [ "x" = "x$RABBITMQ_NODE_PORT" ] && [ "x" != "x$NODE_PORT" ] && RABBITMQ_NODE_PORT=${NODE_PORT} @@ -58,7 +41,6 @@ fi [ "x" = "x$RABBITMQ_NODENAME" ] && RABBITMQ_NODENAME=${NODENAME} [ "x" = "x$RABBITMQ_SERVER_ERL_ARGS" ] && RABBITMQ_SERVER_ERL_ARGS=${SERVER_ERL_ARGS} -[ "x" = "x$RABBITMQ_CLUSTER_CONFIG_FILE" ] && RABBITMQ_CLUSTER_CONFIG_FILE=${CLUSTER_CONFIG_FILE} [ "x" = "x$RABBITMQ_CONFIG_FILE" ] && RABBITMQ_CONFIG_FILE=${CONFIG_FILE} [ "x" = "x$RABBITMQ_LOG_BASE" ] && RABBITMQ_LOG_BASE=${LOG_BASE} [ "x" = "x$RABBITMQ_MNESIA_BASE" ] && RABBITMQ_MNESIA_BASE=${MNESIA_BASE} @@ -67,6 +49,11 @@ [ "x" = "x$RABBITMQ_MNESIA_DIR" ] && RABBITMQ_MNESIA_DIR=${MNESIA_DIR} [ "x" = "x$RABBITMQ_MNESIA_DIR" ] && RABBITMQ_MNESIA_DIR=${RABBITMQ_MNESIA_BASE}/${RABBITMQ_NODENAME} +[ "x" = "x$RABBITMQ_PLUGINS_EXPAND_DIR" ] && RABBITMQ_PLUGINS_EXPAND_DIR=${PLUGINS_EXPAND_DIR} +[ "x" = "x$RABBITMQ_PLUGINS_EXPAND_DIR" ] && RABBITMQ_PLUGINS_EXPAND_DIR=${RABBITMQ_MNESIA_BASE}/${RABBITMQ_NODENAME}-plugins-expand + +[ "x" = "x$RABBITMQ_PLUGINS_DIR" ] && RABBITMQ_PLUGINS_DIR="${RABBITMQ_HOME}/plugins" + ## Log rotation [ "x" = "x$RABBITMQ_LOGS" ] && RABBITMQ_LOGS=${LOGS} [ "x" = "x$RABBITMQ_LOGS" ] && RABBITMQ_LOGS="${RABBITMQ_LOG_BASE}/${RABBITMQ_NODENAME}.log" @@ -78,23 +65,27 @@ [ -f "${RABBITMQ_LOGS}" ] && cat "${RABBITMQ_LOGS}" >> "${RABBITMQ_LOGS}${RABBITMQ_BACKUP_EXTENSION}" [ -f "${RABBITMQ_SASL_LOGS}" ] && cat "${RABBITMQ_SASL_LOGS}" >> "${RABBITMQ_SASL_LOGS}${RABBITMQ_BACKUP_EXTENSION}" -if [ -f "$RABBITMQ_CLUSTER_CONFIG_FILE" ]; then - RABBITMQ_CLUSTER_CONFIG_OPTION="-rabbit cluster_config \"$RABBITMQ_CLUSTER_CONFIG_FILE\"" -else - RABBITMQ_CLUSTER_CONFIG_OPTION="" -fi - RABBITMQ_START_RABBIT= -[ "x" = "x$RABBITMQ_ALLOW_INPUT" ] && RABBITMQ_START_RABBIT='-noinput' +[ "x" = "x$RABBITMQ_ALLOW_INPUT" ] && RABBITMQ_START_RABBIT='-noinput' RABBITMQ_EBIN_ROOT="${RABBITMQ_HOME}/ebin" -if [ -f "${RABBITMQ_EBIN_ROOT}/rabbit.boot" ] && [ "x" = "x$RABBITMQ_NODE_ONLY" ]; then - RABBITMQ_BOOT_FILE="${RABBITMQ_EBIN_ROOT}/rabbit" - RABBITMQ_EBIN_PATH="" +if [ "x" = "x$RABBITMQ_NODE_ONLY" ]; then + if erl \ + -pa "$RABBITMQ_EBIN_ROOT" \ + -noinput \ + -hidden \ + -s rabbit_prelaunch \ + -sname rabbitmqprelaunch$$ \ + -extra "$RABBITMQ_PLUGINS_DIR" "${RABBITMQ_PLUGINS_EXPAND_DIR}" "${RABBITMQ_NODENAME}" + then + RABBITMQ_BOOT_FILE="${RABBITMQ_PLUGINS_EXPAND_DIR}/rabbit" + RABBITMQ_EBIN_PATH="" + else + exit 1 + fi else RABBITMQ_BOOT_FILE=start_sasl RABBITMQ_EBIN_PATH="-pa ${RABBITMQ_EBIN_ROOT}" - [ "x" = "x$RABBITMQ_NODE_ONLY" ] && RABBITMQ_START_RABBIT="${RABBITMQ_START_RABBIT} -s rabbit" fi RABBITMQ_CONFIG_ARG= [ -f "${RABBITMQ_CONFIG_FILE}.config" ] && RABBITMQ_CONFIG_ARG="-config ${RABBITMQ_CONFIG_FILE}" @@ -123,6 +114,5 @@ -os_mon start_disksup false \ -os_mon start_memsup false \ -mnesia dir "\"${RABBITMQ_MNESIA_DIR}\"" \ - ${RABBITMQ_CLUSTER_CONFIG_OPTION} \ ${RABBITMQ_SERVER_START_ARGS} \ "$@" diff -Nru rabbitmq-server-1.7.2/scripts/rabbitmq-server.bat rabbitmq-server-2.3.1/scripts/rabbitmq-server.bat --- rabbitmq-server-1.7.2/scripts/rabbitmq-server.bat 2010-02-15 16:01:13.000000000 +0000 +++ rabbitmq-server-2.3.1/scripts/rabbitmq-server.bat 2011-02-03 12:47:36.000000000 +0000 @@ -1,33 +1,18 @@ @echo off -REM The contents of this file are subject to the Mozilla Public License -REM Version 1.1 (the "License"); you may not use this file except in -REM compliance with the License. You may obtain a copy of the License at -REM http://www.mozilla.org/MPL/ +REM The contents of this file are subject to the Mozilla Public License +REM Version 1.1 (the "License"); you may not use this file except in +REM compliance with the License. You may obtain a copy of the License +REM at http://www.mozilla.org/MPL/ +REM +REM Software distributed under the License is distributed on an "AS IS" +REM basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +REM the License for the specific language governing rights and +REM limitations under the License. REM -REM Software distributed under the License is distributed on an "AS IS" -REM basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the -REM License for the specific language governing rights and limitations -REM under the License. +REM The Original Code is RabbitMQ. REM -REM The Original Code is RabbitMQ. -REM -REM The Initial Developers of the Original Code are LShift Ltd, -REM Cohesive Financial Technologies LLC, and Rabbit Technologies Ltd. -REM -REM Portions created before 22-Nov-2008 00:00:00 GMT by LShift Ltd, -REM Cohesive Financial Technologies LLC, or Rabbit Technologies Ltd -REM are Copyright (C) 2007-2008 LShift Ltd, Cohesive Financial -REM Technologies LLC, and Rabbit Technologies Ltd. -REM -REM Portions created by LShift Ltd are Copyright (C) 2007-2010 LShift -REM Ltd. Portions created by Cohesive Financial Technologies LLC are -REM Copyright (C) 2007-2010 Cohesive Financial Technologies -REM LLC. Portions created by Rabbit Technologies Ltd are Copyright -REM (C) 2007-2010 Rabbit Technologies Ltd. -REM -REM All Rights Reserved. -REM -REM Contributor(s): ______________________________________. +REM The Initial Developer of the Original Code is VMware, Inc. +REM Copyright (c) 2007-2011 VMware, Inc. All rights reserved. REM setlocal @@ -42,13 +27,17 @@ set RABBITMQ_BASE=!APPDATA!\RabbitMQ ) +if "!COMPUTERNAME!"=="" ( + set COMPUTERNAME=localhost +) + if "!RABBITMQ_NODENAME!"=="" ( - set RABBITMQ_NODENAME=rabbit + set RABBITMQ_NODENAME=rabbit@!COMPUTERNAME! ) if "!RABBITMQ_NODE_IP_ADDRESS!"=="" ( if not "!RABBITMQ_NODE_PORT!"=="" ( - set RABBITMQ_NODE_IP_ADDRESS=0.0.0.0 + set RABBITMQ_NODE_IP_ADDRESS=auto ) ) else ( if "!RABBITMQ_NODE_PORT!"=="" ( @@ -99,26 +88,33 @@ rem End of log management -if "!RABBITMQ_CLUSTER_CONFIG_FILE!"=="" ( - set RABBITMQ_CLUSTER_CONFIG_FILE=!RABBITMQ_BASE!\rabbitmq_cluster.config -) -set CLUSTER_CONFIG= -if not exist "!RABBITMQ_CLUSTER_CONFIG_FILE!" GOTO L1 -set CLUSTER_CONFIG=-rabbit cluster_config \""!RABBITMQ_CLUSTER_CONFIG_FILE:\=/!"\" -:L1 - if "!RABBITMQ_MNESIA_DIR!"=="" ( set RABBITMQ_MNESIA_DIR=!RABBITMQ_MNESIA_BASE!/!RABBITMQ_NODENAME!-mnesia ) + +if "!RABBITMQ_PLUGINS_EXPAND_DIR!"=="" ( + set RABBITMQ_PLUGINS_EXPAND_DIR=!RABBITMQ_MNESIA_BASE!/!RABBITMQ_NODENAME!-plugins-expand +) + +set RABBITMQ_PLUGINS_DIR=!TDP0!..\plugins set RABBITMQ_EBIN_ROOT=!TDP0!..\ebin -if exist "!RABBITMQ_EBIN_ROOT!\rabbit.boot" ( - echo Using Custom Boot File "!RABBITMQ_EBIN_ROOT!\rabbit.boot" - set RABBITMQ_BOOT_FILE=!RABBITMQ_EBIN_ROOT!\rabbit - set RABBITMQ_EBIN_PATH= -) else ( - set RABBITMQ_BOOT_FILE=start_sasl - set RABBITMQ_EBIN_PATH=-pa "!RABBITMQ_EBIN_ROOT!" + +"!ERLANG_HOME!\bin\erl.exe" ^ +-pa "!RABBITMQ_EBIN_ROOT!" ^ +-noinput -hidden ^ +-s rabbit_prelaunch ^ +-sname rabbitmqprelaunch!RANDOM! ^ +-extra "!RABBITMQ_PLUGINS_DIR:\=/!" ^ + "!RABBITMQ_PLUGINS_EXPAND_DIR:\=/!" ^ + "!RABBITMQ_NODENAME!" + +set RABBITMQ_BOOT_FILE=!RABBITMQ_PLUGINS_EXPAND_DIR!\rabbit +if ERRORLEVEL 1 ( + exit /B 1 ) + +set RABBITMQ_EBIN_PATH= + if "!RABBITMQ_CONFIG_FILE!"=="" ( set RABBITMQ_CONFIG_FILE=!RABBITMQ_BASE!\rabbitmq ) @@ -145,6 +141,7 @@ -s rabbit ^ +W w ^ +A30 ^ ++P 1048576 ^ -kernel inet_default_listen_options "[{nodelay, true}]" ^ -kernel inet_default_connect_options "[{nodelay, true}]" ^ !RABBITMQ_LISTEN_ARG! ^ @@ -156,7 +153,6 @@ -os_mon start_disksup false ^ -os_mon start_memsup false ^ -mnesia dir \""!RABBITMQ_MNESIA_DIR!"\" ^ -!CLUSTER_CONFIG! ^ !RABBITMQ_SERVER_START_ARGS! ^ !STAR! diff -Nru rabbitmq-server-1.7.2/scripts/rabbitmq-service.bat rabbitmq-server-2.3.1/scripts/rabbitmq-service.bat --- rabbitmq-server-1.7.2/scripts/rabbitmq-service.bat 2010-02-15 16:01:13.000000000 +0000 +++ rabbitmq-server-2.3.1/scripts/rabbitmq-service.bat 2011-02-03 12:47:36.000000000 +0000 @@ -1,33 +1,18 @@ @echo off -REM The contents of this file are subject to the Mozilla Public License -REM Version 1.1 (the "License"); you may not use this file except in -REM compliance with the License. You may obtain a copy of the License at -REM http://www.mozilla.org/MPL/ +REM The contents of this file are subject to the Mozilla Public License +REM Version 1.1 (the "License"); you may not use this file except in +REM compliance with the License. You may obtain a copy of the License +REM at http://www.mozilla.org/MPL/ +REM +REM Software distributed under the License is distributed on an "AS IS" +REM basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +REM the License for the specific language governing rights and +REM limitations under the License. REM -REM Software distributed under the License is distributed on an "AS IS" -REM basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the -REM License for the specific language governing rights and limitations -REM under the License. +REM The Original Code is RabbitMQ. REM -REM The Original Code is RabbitMQ. -REM -REM The Initial Developers of the Original Code are LShift Ltd, -REM Cohesive Financial Technologies LLC, and Rabbit Technologies Ltd. -REM -REM Portions created before 22-Nov-2008 00:00:00 GMT by LShift Ltd, -REM Cohesive Financial Technologies LLC, or Rabbit Technologies Ltd -REM are Copyright (C) 2007-2008 LShift Ltd, Cohesive Financial -REM Technologies LLC, and Rabbit Technologies Ltd. -REM -REM Portions created by LShift Ltd are Copyright (C) 2007-2010 LShift -REM Ltd. Portions created by Cohesive Financial Technologies LLC are -REM Copyright (C) 2007-2010 Cohesive Financial Technologies -REM LLC. Portions created by Rabbit Technologies Ltd are Copyright -REM (C) 2007-2010 Rabbit Technologies Ltd. -REM -REM All Rights Reserved. -REM -REM Contributor(s): ______________________________________. +REM The Initial Developer of the Original Code is VMware, Inc. +REM Copyright (c) 2007-2011 VMware, Inc. All rights reserved. REM setlocal @@ -48,13 +33,17 @@ set RABBITMQ_BASE=!APPDATA!\!RABBITMQ_SERVICENAME! ) +if "!COMPUTERNAME!"=="" ( + set COMPUTERNAME=localhost +) + if "!RABBITMQ_NODENAME!"=="" ( - set RABBITMQ_NODENAME=rabbit + set RABBITMQ_NODENAME=rabbit@!COMPUTERNAME! ) if "!RABBITMQ_NODE_IP_ADDRESS!"=="" ( if not "!RABBITMQ_NODE_PORT!"=="" ( - set RABBITMQ_NODE_IP_ADDRESS=0.0.0.0 + set RABBITMQ_NODE_IP_ADDRESS=auto ) ) else ( if "!RABBITMQ_NODE_PORT!"=="" ( @@ -132,18 +121,13 @@ rem End of log management -if "!RABBITMQ_CLUSTER_CONFIG_FILE!"=="" ( - set RABBITMQ_CLUSTER_CONFIG_FILE=!RABBITMQ_BASE!\rabbitmq_cluster.config -) -set CLUSTER_CONFIG= -if not exist "!RABBITMQ_CLUSTER_CONFIG_FILE!" GOTO L1 -set CLUSTER_CONFIG=-rabbit cluster_config \""!RABBITMQ_CLUSTER_CONFIG_FILE:\=/!"\" -:L1 - if "!RABBITMQ_MNESIA_DIR!"=="" ( set RABBITMQ_MNESIA_DIR=!RABBITMQ_MNESIA_BASE!/!RABBITMQ_NODENAME!-mnesia ) +if "!RABBITMQ_PLUGINS_EXPAND_DIR!"=="" ( + set RABBITMQ_PLUGINS_EXPAND_DIR=!RABBITMQ_MNESIA_BASE!/!RABBITMQ_NODENAME!-plugins-expand +) if "!P1!" == "install" goto INSTALL_SERVICE for %%i in (start stop disable enable list remove) do if "%%i" == "!P1!" goto MODIFY_SERVICE @@ -181,15 +165,24 @@ echo !RABBITMQ_SERVICENAME! service is already present - only updating service parameters ) +set RABBITMQ_PLUGINS_DIR=!TDP0!..\plugins set RABBITMQ_EBIN_ROOT=!TDP0!..\ebin -if exist "!RABBITMQ_EBIN_ROOT!\rabbit.boot" ( - echo Using Custom Boot File "!RABBITMQ_EBIN_ROOT!\rabbit.boot" - set RABBITMQ_BOOT_FILE=!RABBITMQ_EBIN_ROOT!\rabbit - set RABBITMQ_EBIN_PATH= -) else ( - set RABBITMQ_BOOT_FILE=start_sasl - set RABBITMQ_EBIN_PATH=-pa "!RABBITMQ_EBIN_ROOT!" + +"!ERLANG_HOME!\bin\erl.exe" ^ +-pa "!RABBITMQ_EBIN_ROOT!" ^ +-noinput -hidden ^ +-s rabbit_prelaunch ^ +-extra "!RABBITMQ_PLUGINS_DIR:\=/!" ^ + "!RABBITMQ_PLUGINS_EXPAND_DIR:\=/!" ^ + "" + +set RABBITMQ_BOOT_FILE=!RABBITMQ_PLUGINS_EXPAND_DIR!\rabbit +if ERRORLEVEL 1 ( + exit /B 1 ) + +set RABBITMQ_EBIN_PATH= + if "!RABBITMQ_CONFIG_FILE!"=="" ( set RABBITMQ_CONFIG_FILE=!RABBITMQ_BASE!\rabbitmq ) @@ -225,7 +218,6 @@ -os_mon start_disksup false ^ -os_mon start_memsup false ^ -mnesia dir \""!RABBITMQ_MNESIA_DIR!"\" ^ -!CLUSTER_CONFIG! ^ !RABBITMQ_SERVER_START_ARGS! ^ !STAR! @@ -240,6 +232,7 @@ -sname !RABBITMQ_NODENAME! ^ !CONSOLE_FLAG! ^ -args "!ERLANG_SERVICE_ARGUMENTS!" > NUL + goto END diff -Nru rabbitmq-server-1.7.2/src/bpqueue.erl rabbitmq-server-2.3.1/src/bpqueue.erl --- rabbitmq-server-1.7.2/src/bpqueue.erl 1970-01-01 00:00:00.000000000 +0000 +++ rabbitmq-server-2.3.1/src/bpqueue.erl 2011-02-03 12:47:35.000000000 +0000 @@ -0,0 +1,271 @@ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License +%% at http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and +%% limitations under the License. +%% +%% The Original Code is RabbitMQ. +%% +%% The Initial Developer of the Original Code is VMware, Inc. +%% Copyright (c) 2007-2011 VMware, Inc. All rights reserved. +%% + +-module(bpqueue). + +%% Block-prefixed queue. From the perspective of the queue interface +%% the datastructure acts like a regular queue where each value is +%% paired with the prefix. +%% +%% This is implemented as a queue of queues, which is more space and +%% time efficient, whilst supporting the normal queue interface. Each +%% inner queue has a prefix, which does not need to be unique, and it +%% is guaranteed that no two consecutive blocks have the same +%% prefix. len/1 returns the flattened length of the queue and is +%% O(1). + +-export([new/0, is_empty/1, len/1, in/3, in_r/3, out/1, out_r/1, join/2, + foldl/3, foldr/3, from_list/1, to_list/1, map_fold_filter_l/4, + map_fold_filter_r/4]). + +%%---------------------------------------------------------------------------- + +-ifdef(use_specs). + +-export_type([bpqueue/0]). + +-type(bpqueue() :: {non_neg_integer(), queue()}). +-type(prefix() :: any()). +-type(value() :: any()). +-type(result() :: ({'empty', bpqueue()} | + {{'value', prefix(), value()}, bpqueue()})). + +-spec(new/0 :: () -> bpqueue()). +-spec(is_empty/1 :: (bpqueue()) -> boolean()). +-spec(len/1 :: (bpqueue()) -> non_neg_integer()). +-spec(in/3 :: (prefix(), value(), bpqueue()) -> bpqueue()). +-spec(in_r/3 :: (prefix(), value(), bpqueue()) -> bpqueue()). +-spec(out/1 :: (bpqueue()) -> result()). +-spec(out_r/1 :: (bpqueue()) -> result()). +-spec(join/2 :: (bpqueue(), bpqueue()) -> bpqueue()). +-spec(foldl/3 :: (fun ((prefix(), value(), B) -> B), B, bpqueue()) -> B). +-spec(foldr/3 :: (fun ((prefix(), value(), B) -> B), B, bpqueue()) -> B). +-spec(from_list/1 :: ([{prefix(), [value()]}]) -> bpqueue()). +-spec(to_list/1 :: (bpqueue()) -> [{prefix(), [value()]}]). +-spec(map_fold_filter_l/4 :: ((fun ((prefix()) -> boolean())), + (fun ((value(), B) -> + ({prefix(), value(), B} | 'stop'))), + B, + bpqueue()) -> + {bpqueue(), B}). +-spec(map_fold_filter_r/4 :: ((fun ((prefix()) -> boolean())), + (fun ((value(), B) -> + ({prefix(), value(), B} | 'stop'))), + B, + bpqueue()) -> + {bpqueue(), B}). + +-endif. + +%%---------------------------------------------------------------------------- + +new() -> {0, queue:new()}. + +is_empty({0, _Q}) -> true; +is_empty(_BPQ) -> false. + +len({N, _Q}) -> N. + +in(Prefix, Value, {0, Q}) -> + {1, queue:in({Prefix, queue:from_list([Value])}, Q)}; +in(Prefix, Value, BPQ) -> + in1({fun queue:in/2, fun queue:out_r/1}, Prefix, Value, BPQ). + +in_r(Prefix, Value, BPQ = {0, _Q}) -> + in(Prefix, Value, BPQ); +in_r(Prefix, Value, BPQ) -> + in1({fun queue:in_r/2, fun queue:out/1}, Prefix, Value, BPQ). + +in1({In, Out}, Prefix, Value, {N, Q}) -> + {N+1, case Out(Q) of + {{value, {Prefix, InnerQ}}, Q1} -> + In({Prefix, In(Value, InnerQ)}, Q1); + {{value, {_Prefix, _InnerQ}}, _Q1} -> + In({Prefix, queue:in(Value, queue:new())}, Q) + end}. + +in_q(Prefix, Queue, BPQ = {0, Q}) -> + case queue:len(Queue) of + 0 -> BPQ; + N -> {N, queue:in({Prefix, Queue}, Q)} + end; +in_q(Prefix, Queue, BPQ) -> + in_q1({fun queue:in/2, fun queue:out_r/1, + fun queue:join/2}, + Prefix, Queue, BPQ). + +in_q_r(Prefix, Queue, BPQ = {0, _Q}) -> + in_q(Prefix, Queue, BPQ); +in_q_r(Prefix, Queue, BPQ) -> + in_q1({fun queue:in_r/2, fun queue:out/1, + fun (T, H) -> queue:join(H, T) end}, + Prefix, Queue, BPQ). + +in_q1({In, Out, Join}, Prefix, Queue, BPQ = {N, Q}) -> + case queue:len(Queue) of + 0 -> BPQ; + M -> {N + M, case Out(Q) of + {{value, {Prefix, InnerQ}}, Q1} -> + In({Prefix, Join(InnerQ, Queue)}, Q1); + {{value, {_Prefix, _InnerQ}}, _Q1} -> + In({Prefix, Queue}, Q) + end} + end. + +out({0, _Q} = BPQ) -> {empty, BPQ}; +out(BPQ) -> out1({fun queue:in_r/2, fun queue:out/1}, BPQ). + +out_r({0, _Q} = BPQ) -> {empty, BPQ}; +out_r(BPQ) -> out1({fun queue:in/2, fun queue:out_r/1}, BPQ). + +out1({In, Out}, {N, Q}) -> + {{value, {Prefix, InnerQ}}, Q1} = Out(Q), + {{value, Value}, InnerQ1} = Out(InnerQ), + Q2 = case queue:is_empty(InnerQ1) of + true -> Q1; + false -> In({Prefix, InnerQ1}, Q1) + end, + {{value, Prefix, Value}, {N-1, Q2}}. + +join({0, _Q}, BPQ) -> + BPQ; +join(BPQ, {0, _Q}) -> + BPQ; +join({NHead, QHead}, {NTail, QTail}) -> + {{value, {Prefix, InnerQHead}}, QHead1} = queue:out_r(QHead), + {NHead + NTail, + case queue:out(QTail) of + {{value, {Prefix, InnerQTail}}, QTail1} -> + queue:join( + queue:in({Prefix, queue:join(InnerQHead, InnerQTail)}, QHead1), + QTail1); + {{value, {_Prefix, _InnerQTail}}, _QTail1} -> + queue:join(QHead, QTail) + end}. + +foldl(_Fun, Init, {0, _Q}) -> Init; +foldl( Fun, Init, {_N, Q}) -> fold1(fun queue:out/1, Fun, Init, Q). + +foldr(_Fun, Init, {0, _Q}) -> Init; +foldr( Fun, Init, {_N, Q}) -> fold1(fun queue:out_r/1, Fun, Init, Q). + +fold1(Out, Fun, Init, Q) -> + case Out(Q) of + {empty, _Q} -> + Init; + {{value, {Prefix, InnerQ}}, Q1} -> + fold1(Out, Fun, fold1(Out, Fun, Prefix, Init, InnerQ), Q1) + end. + +fold1(Out, Fun, Prefix, Init, InnerQ) -> + case Out(InnerQ) of + {empty, _Q} -> + Init; + {{value, Value}, InnerQ1} -> + fold1(Out, Fun, Prefix, Fun(Prefix, Value, Init), InnerQ1) + end. + +from_list(List) -> + {FinalPrefix, FinalInnerQ, ListOfPQs1, Len} = + lists:foldl( + fun ({_Prefix, []}, Acc) -> + Acc; + ({Prefix, InnerList}, {Prefix, InnerQ, ListOfPQs, LenAcc}) -> + {Prefix, queue:join(InnerQ, queue:from_list(InnerList)), + ListOfPQs, LenAcc + length(InnerList)}; + ({Prefix1, InnerList}, {Prefix, InnerQ, ListOfPQs, LenAcc}) -> + {Prefix1, queue:from_list(InnerList), + [{Prefix, InnerQ} | ListOfPQs], LenAcc + length(InnerList)} + end, {undefined, queue:new(), [], 0}, List), + ListOfPQs2 = [{FinalPrefix, FinalInnerQ} | ListOfPQs1], + [{undefined, InnerQ1} | Rest] = All = lists:reverse(ListOfPQs2), + {Len, queue:from_list(case queue:is_empty(InnerQ1) of + true -> Rest; + false -> All + end)}. + +to_list({0, _Q}) -> []; +to_list({_N, Q}) -> [{Prefix, queue:to_list(InnerQ)} || + {Prefix, InnerQ} <- queue:to_list(Q)]. + +%% map_fold_filter_[lr](FilterFun, Fun, Init, BPQ) -> {BPQ, Init} +%% where FilterFun(Prefix) -> boolean() +%% Fun(Value, Init) -> {Prefix, Value, Init} | stop +%% +%% The filter fun allows you to skip very quickly over blocks that +%% you're not interested in. Such blocks appear in the resulting bpq +%% without modification. The Fun is then used both to map the value, +%% which also allows you to change the prefix (and thus block) of the +%% value, and also to modify the Init/Acc (just like a fold). If the +%% Fun returns 'stop' then it is not applied to any further items. +map_fold_filter_l(_PFilter, _Fun, Init, BPQ = {0, _Q}) -> + {BPQ, Init}; +map_fold_filter_l(PFilter, Fun, Init, {N, Q}) -> + map_fold_filter1({fun queue:out/1, fun queue:in/2, + fun in_q/3, fun join/2}, + N, PFilter, Fun, Init, Q, new()). + +map_fold_filter_r(_PFilter, _Fun, Init, BPQ = {0, _Q}) -> + {BPQ, Init}; +map_fold_filter_r(PFilter, Fun, Init, {N, Q}) -> + map_fold_filter1({fun queue:out_r/1, fun queue:in_r/2, + fun in_q_r/3, fun (T, H) -> join(H, T) end}, + N, PFilter, Fun, Init, Q, new()). + +map_fold_filter1(Funs = {Out, _In, InQ, Join}, Len, PFilter, Fun, + Init, Q, QNew) -> + case Out(Q) of + {empty, _Q} -> + {QNew, Init}; + {{value, {Prefix, InnerQ}}, Q1} -> + case PFilter(Prefix) of + true -> + {Init1, QNew1, Cont} = + map_fold_filter2(Funs, Fun, Prefix, Prefix, + Init, InnerQ, QNew, queue:new()), + case Cont of + false -> {Join(QNew1, {Len - len(QNew1), Q1}), Init1}; + true -> map_fold_filter1(Funs, Len, PFilter, Fun, + Init1, Q1, QNew1) + end; + false -> + map_fold_filter1(Funs, Len, PFilter, Fun, + Init, Q1, InQ(Prefix, InnerQ, QNew)) + end + end. + +map_fold_filter2(Funs = {Out, In, InQ, _Join}, Fun, OrigPrefix, Prefix, + Init, InnerQ, QNew, InnerQNew) -> + case Out(InnerQ) of + {empty, _Q} -> + {Init, InQ(OrigPrefix, InnerQ, + InQ(Prefix, InnerQNew, QNew)), true}; + {{value, Value}, InnerQ1} -> + case Fun(Value, Init) of + stop -> + {Init, InQ(OrigPrefix, InnerQ, + InQ(Prefix, InnerQNew, QNew)), false}; + {Prefix1, Value1, Init1} -> + {Prefix2, QNew1, InnerQNew1} = + case Prefix1 =:= Prefix of + true -> {Prefix, QNew, In(Value1, InnerQNew)}; + false -> {Prefix1, InQ(Prefix, InnerQNew, QNew), + In(Value1, queue:new())} + end, + map_fold_filter2(Funs, Fun, OrigPrefix, Prefix2, + Init1, InnerQ1, QNew1, InnerQNew1) + end + end. diff -Nru rabbitmq-server-1.7.2/src/delegate.erl rabbitmq-server-2.3.1/src/delegate.erl --- rabbitmq-server-1.7.2/src/delegate.erl 1970-01-01 00:00:00.000000000 +0000 +++ rabbitmq-server-2.3.1/src/delegate.erl 2011-02-03 12:47:35.000000000 +0000 @@ -0,0 +1,164 @@ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License +%% at http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and +%% limitations under the License. +%% +%% The Original Code is RabbitMQ. +%% +%% The Initial Developer of the Original Code is VMware, Inc. +%% Copyright (c) 2007-2011 VMware, Inc. All rights reserved. +%% + +-module(delegate). + +-behaviour(gen_server2). + +-export([start_link/1, invoke_no_result/2, invoke/2, delegate_count/1]). + +-export([init/1, handle_call/3, handle_cast/2, handle_info/2, + terminate/2, code_change/3]). + +%%---------------------------------------------------------------------------- + +-ifdef(use_specs). + +-spec(start_link/1 :: + (non_neg_integer()) -> {'ok', pid()} | {'error', any()}). +-spec(invoke_no_result/2 :: + (pid() | [pid()], fun ((pid()) -> any())) -> 'ok'). +-spec(invoke/2 :: + ( pid(), fun ((pid()) -> A)) -> A; + ([pid()], fun ((pid()) -> A)) -> {[{pid(), A}], + [{pid(), term()}]}). + +-spec(delegate_count/1 :: ([node()]) -> non_neg_integer()). + +-endif. + +%%---------------------------------------------------------------------------- + +-define(HIBERNATE_AFTER_MIN, 1000). +-define(DESIRED_HIBERNATE, 10000). + +%%---------------------------------------------------------------------------- + +start_link(Num) -> + gen_server2:start_link({local, delegate_name(Num)}, ?MODULE, [], []). + +invoke(Pid, Fun) when is_pid(Pid) andalso node(Pid) =:= node() -> + Fun(Pid); +invoke(Pid, Fun) when is_pid(Pid) -> + case invoke([Pid], Fun) of + {[{Pid, Result}], []} -> + Result; + {[], [{Pid, {Class, Reason, StackTrace}}]} -> + erlang:raise(Class, Reason, StackTrace) + end; + +invoke(Pids, Fun) when is_list(Pids) -> + {LocalPids, Grouped} = group_pids_by_node(Pids), + %% The use of multi_call is only safe because the timeout is + %% infinity, and thus there is no process spawned in order to do + %% the sending. Thus calls can't overtake preceding calls/casts. + {Replies, BadNodes} = + case orddict:fetch_keys(Grouped) of + [] -> {[], []}; + RemoteNodes -> gen_server2:multi_call( + RemoteNodes, delegate(RemoteNodes), + {invoke, Fun, Grouped}, infinity) + end, + BadPids = [{Pid, {exit, {nodedown, BadNode}, []}} || + BadNode <- BadNodes, + Pid <- orddict:fetch(BadNode, Grouped)], + ResultsNoNode = lists:append([safe_invoke(LocalPids, Fun) | + [Results || {_Node, Results} <- Replies]]), + lists:foldl( + fun ({ok, Pid, Result}, {Good, Bad}) -> {[{Pid, Result} | Good], Bad}; + ({error, Pid, Error}, {Good, Bad}) -> {Good, [{Pid, Error} | Bad]} + end, {[], BadPids}, ResultsNoNode). + +invoke_no_result(Pid, Fun) when is_pid(Pid) andalso node(Pid) =:= node() -> + safe_invoke(Pid, Fun), %% we don't care about any error + ok; +invoke_no_result(Pid, Fun) when is_pid(Pid) -> + invoke_no_result([Pid], Fun); + +invoke_no_result(Pids, Fun) when is_list(Pids) -> + {LocalPids, Grouped} = group_pids_by_node(Pids), + case orddict:fetch_keys(Grouped) of + [] -> ok; + RemoteNodes -> gen_server2:abcast(RemoteNodes, delegate(RemoteNodes), + {invoke, Fun, Grouped}) + end, + safe_invoke(LocalPids, Fun), %% must not die + ok. + +%%---------------------------------------------------------------------------- + +group_pids_by_node(Pids) -> + LocalNode = node(), + lists:foldl( + fun (Pid, {Local, Remote}) when node(Pid) =:= LocalNode -> + {[Pid | Local], Remote}; + (Pid, {Local, Remote}) -> + {Local, + orddict:update( + node(Pid), fun (List) -> [Pid | List] end, [Pid], Remote)} + end, {[], orddict:new()}, Pids). + +delegate_count([RemoteNode | _]) -> + {ok, Count} = case application:get_env(rabbit, delegate_count) of + undefined -> rpc:call(RemoteNode, application, get_env, + [rabbit, delegate_count]); + Result -> Result + end, + Count. + +delegate_name(Hash) -> + list_to_atom("delegate_" ++ integer_to_list(Hash)). + +delegate(RemoteNodes) -> + case get(delegate) of + undefined -> Name = + delegate_name(erlang:phash2( + self(), delegate_count(RemoteNodes))), + put(delegate, Name), + Name; + Name -> Name + end. + +safe_invoke(Pids, Fun) when is_list(Pids) -> + [safe_invoke(Pid, Fun) || Pid <- Pids]; +safe_invoke(Pid, Fun) when is_pid(Pid) -> + try + {ok, Pid, Fun(Pid)} + catch Class:Reason -> + {error, Pid, {Class, Reason, erlang:get_stacktrace()}} + end. + +%%---------------------------------------------------------------------------- + +init([]) -> + {ok, node(), hibernate, + {backoff, ?HIBERNATE_AFTER_MIN, ?HIBERNATE_AFTER_MIN, ?DESIRED_HIBERNATE}}. + +handle_call({invoke, Fun, Grouped}, _From, Node) -> + {reply, safe_invoke(orddict:fetch(Node, Grouped), Fun), Node, hibernate}. + +handle_cast({invoke, Fun, Grouped}, Node) -> + safe_invoke(orddict:fetch(Node, Grouped), Fun), + {noreply, Node, hibernate}. + +handle_info(_Info, Node) -> + {noreply, Node, hibernate}. + +terminate(_Reason, _State) -> + ok. + +code_change(_OldVsn, Node, _Extra) -> + {ok, Node}. diff -Nru rabbitmq-server-1.7.2/src/delegate_sup.erl rabbitmq-server-2.3.1/src/delegate_sup.erl --- rabbitmq-server-1.7.2/src/delegate_sup.erl 1970-01-01 00:00:00.000000000 +0000 +++ rabbitmq-server-2.3.1/src/delegate_sup.erl 2011-02-03 12:47:35.000000000 +0000 @@ -0,0 +1,47 @@ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License +%% at http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and +%% limitations under the License. +%% +%% The Original Code is RabbitMQ. +%% +%% The Initial Developer of the Original Code is VMware, Inc. +%% Copyright (c) 2007-2011 VMware, Inc. All rights reserved. +%% + +-module(delegate_sup). + +-behaviour(supervisor). + +-export([start_link/0]). + +-export([init/1]). + +-define(SERVER, ?MODULE). + +%%---------------------------------------------------------------------------- + +-ifdef(use_specs). + +-spec(start_link/0 :: () -> {'ok', pid()} | {'error', any()}). + +-endif. + +%%---------------------------------------------------------------------------- + +start_link() -> + supervisor:start_link({local, ?SERVER}, ?MODULE, []). + +%%---------------------------------------------------------------------------- + +init(_Args) -> + DCount = delegate:delegate_count([node()]), + {ok, {{one_for_one, 10, 10}, + [{Num, {delegate, start_link, [Num]}, + transient, 16#ffffffff, worker, [delegate]} || + Num <- lists:seq(0, DCount - 1)]}}. diff -Nru rabbitmq-server-1.7.2/src/file_handle_cache.erl rabbitmq-server-2.3.1/src/file_handle_cache.erl --- rabbitmq-server-1.7.2/src/file_handle_cache.erl 1970-01-01 00:00:00.000000000 +0000 +++ rabbitmq-server-2.3.1/src/file_handle_cache.erl 2011-02-03 12:47:35.000000000 +0000 @@ -0,0 +1,1176 @@ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License +%% at http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and +%% limitations under the License. +%% +%% The Original Code is RabbitMQ. +%% +%% The Initial Developer of the Original Code is VMware, Inc. +%% Copyright (c) 2007-2011 VMware, Inc. All rights reserved. +%% + +-module(file_handle_cache). + +%% A File Handle Cache +%% +%% This extends a subset of the functionality of the Erlang file +%% module. In the below, we use "file handle" to specifically refer to +%% file handles, and "file descriptor" to refer to descriptors which +%% are not file handles, e.g. sockets. +%% +%% Some constraints +%% 1) This supports one writer, multiple readers per file. Nothing +%% else. +%% 2) Do not open the same file from different processes. Bad things +%% may happen, especially for writes. +%% 3) Writes are all appends. You cannot write to the middle of a +%% file, although you can truncate and then append if you want. +%% 4) Although there is a write buffer, there is no read buffer. Feel +%% free to use the read_ahead mode, but beware of the interaction +%% between that buffer and the write buffer. +%% +%% Some benefits +%% 1) You do not have to remember to call sync before close +%% 2) Buffering is much more flexible than with the plain file module, +%% and you can control when the buffer gets flushed out. This means +%% that you can rely on reads-after-writes working, without having to +%% call the expensive sync. +%% 3) Unnecessary calls to position and sync get optimised out. +%% 4) You can find out what your 'real' offset is, and what your +%% 'virtual' offset is (i.e. where the hdl really is, and where it +%% would be after the write buffer is written out). +%% 5) You can find out what the offset was when you last sync'd. +%% +%% There is also a server component which serves to limit the number +%% of open file descriptors. This is a hard limit: the server +%% component will ensure that clients do not have more file +%% descriptors open than it's configured to allow. +%% +%% On open, the client requests permission from the server to open the +%% required number of file handles. The server may ask the client to +%% close other file handles that it has open, or it may queue the +%% request and ask other clients to close file handles they have open +%% in order to satisfy the request. Requests are always satisfied in +%% the order they arrive, even if a latter request (for a small number +%% of file handles) can be satisfied before an earlier request (for a +%% larger number of file handles). On close, the client sends a +%% message to the server. These messages allow the server to keep +%% track of the number of open handles. The client also keeps a +%% gb_tree which is updated on every use of a file handle, mapping the +%% time at which the file handle was last used (timestamp) to the +%% handle. Thus the smallest key in this tree maps to the file handle +%% that has not been used for the longest amount of time. This +%% smallest key is included in the messages to the server. As such, +%% the server keeps track of when the least recently used file handle +%% was used *at the point of the most recent open or close* by each +%% client. +%% +%% Note that this data can go very out of date, by the client using +%% the least recently used handle. +%% +%% When the limit is exceeded (i.e. the number of open file handles is +%% at the limit and there are pending 'open' requests), the server +%% calculates the average age of the last reported least recently used +%% file handle of all the clients. It then tells all the clients to +%% close any handles not used for longer than this average, by +%% invoking the callback the client registered. The client should +%% receive this message and pass it into +%% set_maximum_since_use/1. However, it is highly possible this age +%% will be greater than the ages of all the handles the client knows +%% of because the client has used its file handles in the mean +%% time. Thus at this point the client reports to the server the +%% current timestamp at which its least recently used file handle was +%% last used. The server will check two seconds later that either it +%% is back under the limit, in which case all is well again, or if +%% not, it will calculate a new average age. Its data will be much +%% more recent now, and so it is very likely that when this is +%% communicated to the clients, the clients will close file handles. +%% (In extreme cases, where it's very likely that all clients have +%% used their open handles since they last sent in an update, which +%% would mean that the average will never cause any file handles to +%% be closed, the server can send out an average age of 0, resulting +%% in all available clients closing all their file handles.) +%% +%% Care is taken to ensure that (a) processes which are blocked +%% waiting for file descriptors to become available are not sent +%% requests to close file handles; and (b) given it is known how many +%% file handles a process has open, when the average age is forced to +%% 0, close messages are only sent to enough processes to release the +%% correct number of file handles and the list of processes is +%% randomly shuffled. This ensures we don't cause processes to +%% needlessly close file handles, and ensures that we don't always +%% make such requests of the same processes. +%% +%% The advantage of this scheme is that there is only communication +%% from the client to the server on open, close, and when in the +%% process of trying to reduce file handle usage. There is no +%% communication from the client to the server on normal file handle +%% operations. This scheme forms a feed-back loop - the server does +%% not care which file handles are closed, just that some are, and it +%% checks this repeatedly when over the limit. +%% +%% Handles which are closed as a result of the server are put into a +%% "soft-closed" state in which the handle is closed (data flushed out +%% and sync'd first) but the state is maintained. The handle will be +%% fully reopened again as soon as needed, thus users of this library +%% do not need to worry about their handles being closed by the server +%% - reopening them when necessary is handled transparently. +%% +%% The server also supports obtain and transfer. obtain/0 blocks until +%% a file descriptor is available, at which point the requesting +%% process is considered to 'own' one more descriptor. transfer/1 +%% transfers ownership of a file descriptor between processes. It is +%% non-blocking. Obtain is used to obtain permission to accept file +%% descriptors. Obtain has a lower limit, set by the ?OBTAIN_LIMIT/1 +%% macro. File handles can use the entire limit, but will be evicted +%% by obtain calls up to the point at which no more obtain calls can +%% be satisfied by the obtains limit. Thus there will always be some +%% capacity available for file handles. Processes that use obtain are +%% never asked to return them, and they are not managed in any way by +%% the server. It is simply a mechanism to ensure that processes that +%% need file descriptors such as sockets can do so in such a way that +%% the overall number of open file descriptors is managed. +%% +%% The callers of register_callback/3, obtain/0, and the argument of +%% transfer/1 are monitored, reducing the count of handles in use +%% appropriately when the processes terminate. + +-behaviour(gen_server). + +-export([register_callback/3]). +-export([open/3, close/1, read/2, append/2, sync/1, position/2, truncate/1, + last_sync_offset/1, current_virtual_offset/1, current_raw_offset/1, + flush/1, copy/3, set_maximum_since_use/1, delete/1, clear/1]). +-export([obtain/0, transfer/1, set_limit/1, get_limit/0]). +-export([ulimit/0]). + +-export([start_link/0, init/1, handle_call/3, handle_cast/2, handle_info/2, + terminate/2, code_change/3]). + +-define(SERVER, ?MODULE). +-define(RESERVED_FOR_OTHERS, 100). + +%% Googling around suggests that Windows has a limit somewhere around +%% 16M, eg +%% http://blogs.technet.com/markrussinovich/archive/2009/09/29/3283844.aspx +%% however, it turns out that's only available through the win32 +%% API. Via the C Runtime, we have just 512: +%% http://msdn.microsoft.com/en-us/library/6e3b887c%28VS.80%29.aspx +-define(FILE_HANDLES_LIMIT_WINDOWS, 512). +-define(FILE_HANDLES_LIMIT_OTHER, 1024). +-define(FILE_HANDLES_CHECK_INTERVAL, 2000). + +-define(OBTAIN_LIMIT(LIMIT), trunc((LIMIT * 0.9) - 2)). +-define(CLIENT_ETS_TABLE, ?MODULE). + +%%---------------------------------------------------------------------------- + +-record(file, + { reader_count, + has_writer + }). + +-record(handle, + { hdl, + offset, + trusted_offset, + is_dirty, + write_buffer_size, + write_buffer_size_limit, + write_buffer, + at_eof, + path, + mode, + options, + is_write, + is_read, + last_used_at + }). + +-record(fhc_state, + { elders, + limit, + open_count, + open_pending, + obtain_limit, + obtain_count, + obtain_pending, + clients, + timer_ref + }). + +-record(cstate, + { pid, + callback, + opened, + obtained, + blocked, + pending_closes + }). + +-record(pending, + { kind, + pid, + requested, + from + }). + +%%---------------------------------------------------------------------------- +%% Specs +%%---------------------------------------------------------------------------- + +-ifdef(use_specs). + +-type(ref() :: any()). +-type(ok_or_error() :: 'ok' | {'error', any()}). +-type(val_or_error(T) :: {'ok', T} | {'error', any()}). +-type(position() :: ('bof' | 'eof' | non_neg_integer() | + {('bof' |'eof'), non_neg_integer()} | + {'cur', integer()})). +-type(offset() :: non_neg_integer()). + +-spec(register_callback/3 :: (atom(), atom(), [any()]) -> 'ok'). +-spec(open/3 :: + (string(), [any()], + [{'write_buffer', (non_neg_integer() | 'infinity' | 'unbuffered')}]) + -> val_or_error(ref())). +-spec(close/1 :: (ref()) -> ok_or_error()). +-spec(read/2 :: (ref(), non_neg_integer()) -> + val_or_error([char()] | binary()) | 'eof'). +-spec(append/2 :: (ref(), iodata()) -> ok_or_error()). +-spec(sync/1 :: (ref()) -> ok_or_error()). +-spec(position/2 :: (ref(), position()) -> val_or_error(offset())). +-spec(truncate/1 :: (ref()) -> ok_or_error()). +-spec(last_sync_offset/1 :: (ref()) -> val_or_error(offset())). +-spec(current_virtual_offset/1 :: (ref()) -> val_or_error(offset())). +-spec(current_raw_offset/1 :: (ref()) -> val_or_error(offset())). +-spec(flush/1 :: (ref()) -> ok_or_error()). +-spec(copy/3 :: (ref(), ref(), non_neg_integer()) -> + val_or_error(non_neg_integer())). +-spec(set_maximum_since_use/1 :: (non_neg_integer()) -> 'ok'). +-spec(delete/1 :: (ref()) -> ok_or_error()). +-spec(clear/1 :: (ref()) -> ok_or_error()). +-spec(obtain/0 :: () -> 'ok'). +-spec(transfer/1 :: (pid()) -> 'ok'). +-spec(set_limit/1 :: (non_neg_integer()) -> 'ok'). +-spec(get_limit/0 :: () -> non_neg_integer()). +-spec(ulimit/0 :: () -> 'infinity' | 'unknown' | non_neg_integer()). + +-endif. + +%%---------------------------------------------------------------------------- +%% Public API +%%---------------------------------------------------------------------------- + +start_link() -> + gen_server:start_link({local, ?SERVER}, ?MODULE, [], [{timeout, infinity}]). + +register_callback(M, F, A) + when is_atom(M) andalso is_atom(F) andalso is_list(A) -> + gen_server:cast(?SERVER, {register_callback, self(), {M, F, A}}). + +open(Path, Mode, Options) -> + Path1 = filename:absname(Path), + File1 = #file { reader_count = RCount, has_writer = HasWriter } = + case get({Path1, fhc_file}) of + File = #file {} -> File; + undefined -> #file { reader_count = 0, + has_writer = false } + end, + Mode1 = append_to_write(Mode), + IsWriter = is_writer(Mode1), + case IsWriter andalso HasWriter of + true -> {error, writer_exists}; + false -> {ok, Ref} = new_closed_handle(Path1, Mode1, Options), + case get_or_reopen([{Ref, new}]) of + {ok, [_Handle1]} -> + RCount1 = case is_reader(Mode1) of + true -> RCount + 1; + false -> RCount + end, + HasWriter1 = HasWriter orelse IsWriter, + put({Path1, fhc_file}, + File1 #file { reader_count = RCount1, + has_writer = HasWriter1 }), + {ok, Ref}; + Error -> + erase({Ref, fhc_handle}), + Error + end + end. + +close(Ref) -> + case erase({Ref, fhc_handle}) of + undefined -> ok; + Handle -> case hard_close(Handle) of + ok -> ok; + {Error, Handle1} -> put_handle(Ref, Handle1), + Error + end + end. + +read(Ref, Count) -> + with_flushed_handles( + [Ref], + fun ([#handle { is_read = false }]) -> + {error, not_open_for_reading}; + ([Handle = #handle { hdl = Hdl, offset = Offset }]) -> + case file:read(Hdl, Count) of + {ok, Data} = Obj -> Offset1 = Offset + iolist_size(Data), + {Obj, + [Handle #handle { offset = Offset1 }]}; + eof -> {eof, [Handle #handle { at_eof = true }]}; + Error -> {Error, [Handle]} + end + end). + +append(Ref, Data) -> + with_handles( + [Ref], + fun ([#handle { is_write = false }]) -> + {error, not_open_for_writing}; + ([Handle]) -> + case maybe_seek(eof, Handle) of + {{ok, _Offset}, #handle { hdl = Hdl, offset = Offset, + write_buffer_size_limit = 0, + at_eof = true } = Handle1} -> + Offset1 = Offset + iolist_size(Data), + {file:write(Hdl, Data), + [Handle1 #handle { is_dirty = true, offset = Offset1 }]}; + {{ok, _Offset}, #handle { write_buffer = WriteBuffer, + write_buffer_size = Size, + write_buffer_size_limit = Limit, + at_eof = true } = Handle1} -> + WriteBuffer1 = [Data | WriteBuffer], + Size1 = Size + iolist_size(Data), + Handle2 = Handle1 #handle { write_buffer = WriteBuffer1, + write_buffer_size = Size1 }, + case Limit =/= infinity andalso Size1 > Limit of + true -> {Result, Handle3} = write_buffer(Handle2), + {Result, [Handle3]}; + false -> {ok, [Handle2]} + end; + {{error, _} = Error, Handle1} -> + {Error, [Handle1]} + end + end). + +sync(Ref) -> + with_flushed_handles( + [Ref], + fun ([#handle { is_dirty = false, write_buffer = [] }]) -> + ok; + ([Handle = #handle { hdl = Hdl, offset = Offset, + is_dirty = true, write_buffer = [] }]) -> + case file:sync(Hdl) of + ok -> {ok, [Handle #handle { trusted_offset = Offset, + is_dirty = false }]}; + Error -> {Error, [Handle]} + end + end). + +position(Ref, NewOffset) -> + with_flushed_handles( + [Ref], + fun ([Handle]) -> {Result, Handle1} = maybe_seek(NewOffset, Handle), + {Result, [Handle1]} + end). + +truncate(Ref) -> + with_flushed_handles( + [Ref], + fun ([Handle1 = #handle { hdl = Hdl, offset = Offset, + trusted_offset = TOffset }]) -> + case file:truncate(Hdl) of + ok -> TOffset1 = lists:min([Offset, TOffset]), + {ok, [Handle1 #handle { trusted_offset = TOffset1, + at_eof = true }]}; + Error -> {Error, [Handle1]} + end + end). + +last_sync_offset(Ref) -> + with_handles([Ref], fun ([#handle { trusted_offset = TOffset }]) -> + {ok, TOffset} + end). + +current_virtual_offset(Ref) -> + with_handles([Ref], fun ([#handle { at_eof = true, is_write = true, + offset = Offset, + write_buffer_size = Size }]) -> + {ok, Offset + Size}; + ([#handle { offset = Offset }]) -> + {ok, Offset} + end). + +current_raw_offset(Ref) -> + with_handles([Ref], fun ([Handle]) -> {ok, Handle #handle.offset} end). + +flush(Ref) -> + with_flushed_handles([Ref], fun ([Handle]) -> {ok, [Handle]} end). + +copy(Src, Dest, Count) -> + with_flushed_handles( + [Src, Dest], + fun ([SHandle = #handle { is_read = true, hdl = SHdl, offset = SOffset }, + DHandle = #handle { is_write = true, hdl = DHdl, offset = DOffset }] + ) -> + case file:copy(SHdl, DHdl, Count) of + {ok, Count1} = Result1 -> + {Result1, + [SHandle #handle { offset = SOffset + Count1 }, + DHandle #handle { offset = DOffset + Count1, + is_dirty = true }]}; + Error -> + {Error, [SHandle, DHandle]} + end; + (_Handles) -> + {error, incorrect_handle_modes} + end). + +delete(Ref) -> + case erase({Ref, fhc_handle}) of + undefined -> + ok; + Handle = #handle { path = Path } -> + case hard_close(Handle #handle { is_dirty = false, + write_buffer = [] }) of + ok -> file:delete(Path); + {Error, Handle1} -> put_handle(Ref, Handle1), + Error + end + end. + +clear(Ref) -> + with_handles( + [Ref], + fun ([#handle { at_eof = true, write_buffer_size = 0, offset = 0 }]) -> + ok; + ([Handle]) -> + case maybe_seek(bof, Handle #handle { write_buffer = [], + write_buffer_size = 0 }) of + {{ok, 0}, Handle1 = #handle { hdl = Hdl }} -> + case file:truncate(Hdl) of + ok -> {ok, [Handle1 #handle {trusted_offset = 0, + at_eof = true }]}; + Error -> {Error, [Handle1]} + end; + {{error, _} = Error, Handle1} -> + {Error, [Handle1]} + end + end). + +set_maximum_since_use(MaximumAge) -> + Now = now(), + case lists:foldl( + fun ({{Ref, fhc_handle}, + Handle = #handle { hdl = Hdl, last_used_at = Then }}, Rep) -> + case Hdl =/= closed andalso + timer:now_diff(Now, Then) >= MaximumAge of + true -> soft_close(Ref, Handle) orelse Rep; + false -> Rep + end; + (_KeyValuePair, Rep) -> + Rep + end, false, get()) of + false -> age_tree_change(), ok; + true -> ok + end. + +obtain() -> + gen_server:call(?SERVER, {obtain, self()}, infinity). + +transfer(Pid) -> + gen_server:cast(?SERVER, {transfer, self(), Pid}). + +set_limit(Limit) -> + gen_server:call(?SERVER, {set_limit, Limit}, infinity). + +get_limit() -> + gen_server:call(?SERVER, get_limit, infinity). + +%%---------------------------------------------------------------------------- +%% Internal functions +%%---------------------------------------------------------------------------- + +is_reader(Mode) -> lists:member(read, Mode). + +is_writer(Mode) -> lists:member(write, Mode). + +append_to_write(Mode) -> + case lists:member(append, Mode) of + true -> [write | Mode -- [append, write]]; + false -> Mode + end. + +with_handles(Refs, Fun) -> + case get_or_reopen([{Ref, reopen} || Ref <- Refs]) of + {ok, Handles} -> + case Fun(Handles) of + {Result, Handles1} when is_list(Handles1) -> + lists:zipwith(fun put_handle/2, Refs, Handles1), + Result; + Result -> + Result + end; + Error -> + Error + end. + +with_flushed_handles(Refs, Fun) -> + with_handles( + Refs, + fun (Handles) -> + case lists:foldl( + fun (Handle, {ok, HandlesAcc}) -> + {Res, Handle1} = write_buffer(Handle), + {Res, [Handle1 | HandlesAcc]}; + (Handle, {Error, HandlesAcc}) -> + {Error, [Handle | HandlesAcc]} + end, {ok, []}, Handles) of + {ok, Handles1} -> + Fun(lists:reverse(Handles1)); + {Error, Handles1} -> + {Error, lists:reverse(Handles1)} + end + end). + +get_or_reopen(RefNewOrReopens) -> + case partition_handles(RefNewOrReopens) of + {OpenHdls, []} -> + {ok, [Handle || {_Ref, Handle} <- OpenHdls]}; + {OpenHdls, ClosedHdls} -> + Oldest = oldest(get_age_tree(), fun () -> now() end), + case gen_server:call(?SERVER, {open, self(), length(ClosedHdls), + Oldest}, infinity) of + ok -> + case reopen(ClosedHdls) of + {ok, RefHdls} -> sort_handles(RefNewOrReopens, + OpenHdls, RefHdls, []); + Error -> Error + end; + close -> + [soft_close(Ref, Handle) || + {{Ref, fhc_handle}, Handle = #handle { hdl = Hdl }} <- + get(), + Hdl =/= closed], + get_or_reopen(RefNewOrReopens) + end + end. + +reopen(ClosedHdls) -> reopen(ClosedHdls, get_age_tree(), []). + +reopen([], Tree, RefHdls) -> + put_age_tree(Tree), + {ok, lists:reverse(RefHdls)}; +reopen([{Ref, NewOrReopen, Handle = #handle { hdl = closed, + path = Path, + mode = Mode, + offset = Offset, + last_used_at = undefined }} | + RefNewOrReopenHdls] = ToOpen, Tree, RefHdls) -> + case file:open(Path, case NewOrReopen of + new -> Mode; + reopen -> [read | Mode] + end) of + {ok, Hdl} -> + Now = now(), + {{ok, Offset1}, Handle1} = + maybe_seek(Offset, Handle #handle { hdl = Hdl, + offset = 0, + last_used_at = Now }), + Handle2 = Handle1 #handle { trusted_offset = Offset1 }, + put({Ref, fhc_handle}, Handle2), + reopen(RefNewOrReopenHdls, gb_trees:insert(Now, Ref, Tree), + [{Ref, Handle2} | RefHdls]); + Error -> + %% NB: none of the handles in ToOpen are in the age tree + Oldest = oldest(Tree, fun () -> undefined end), + [gen_server:cast(?SERVER, {close, self(), Oldest}) || _ <- ToOpen], + put_age_tree(Tree), + Error + end. + +partition_handles(RefNewOrReopens) -> + lists:foldr( + fun ({Ref, NewOrReopen}, {Open, Closed}) -> + case get({Ref, fhc_handle}) of + #handle { hdl = closed } = Handle -> + {Open, [{Ref, NewOrReopen, Handle} | Closed]}; + #handle {} = Handle -> + {[{Ref, Handle} | Open], Closed} + end + end, {[], []}, RefNewOrReopens). + +sort_handles([], [], [], Acc) -> + {ok, lists:reverse(Acc)}; +sort_handles([{Ref, _} | RefHdls], [{Ref, Handle} | RefHdlsA], RefHdlsB, Acc) -> + sort_handles(RefHdls, RefHdlsA, RefHdlsB, [Handle | Acc]); +sort_handles([{Ref, _} | RefHdls], RefHdlsA, [{Ref, Handle} | RefHdlsB], Acc) -> + sort_handles(RefHdls, RefHdlsA, RefHdlsB, [Handle | Acc]). + +put_handle(Ref, Handle = #handle { last_used_at = Then }) -> + Now = now(), + age_tree_update(Then, Now, Ref), + put({Ref, fhc_handle}, Handle #handle { last_used_at = Now }). + +with_age_tree(Fun) -> put_age_tree(Fun(get_age_tree())). + +get_age_tree() -> + case get(fhc_age_tree) of + undefined -> gb_trees:empty(); + AgeTree -> AgeTree + end. + +put_age_tree(Tree) -> put(fhc_age_tree, Tree). + +age_tree_update(Then, Now, Ref) -> + with_age_tree( + fun (Tree) -> + gb_trees:insert(Now, Ref, gb_trees:delete_any(Then, Tree)) + end). + +age_tree_delete(Then) -> + with_age_tree( + fun (Tree) -> + Tree1 = gb_trees:delete_any(Then, Tree), + Oldest = oldest(Tree1, fun () -> undefined end), + gen_server:cast(?SERVER, {close, self(), Oldest}), + Tree1 + end). + +age_tree_change() -> + with_age_tree( + fun (Tree) -> + case gb_trees:is_empty(Tree) of + true -> Tree; + false -> {Oldest, _Ref} = gb_trees:smallest(Tree), + gen_server:cast(?SERVER, {update, self(), Oldest}) + end, + Tree + end). + +oldest(Tree, DefaultFun) -> + case gb_trees:is_empty(Tree) of + true -> DefaultFun(); + false -> {Oldest, _Ref} = gb_trees:smallest(Tree), + Oldest + end. + +new_closed_handle(Path, Mode, Options) -> + WriteBufferSize = + case proplists:get_value(write_buffer, Options, unbuffered) of + unbuffered -> 0; + infinity -> infinity; + N when is_integer(N) -> N + end, + Ref = make_ref(), + put({Ref, fhc_handle}, #handle { hdl = closed, + offset = 0, + trusted_offset = 0, + is_dirty = false, + write_buffer_size = 0, + write_buffer_size_limit = WriteBufferSize, + write_buffer = [], + at_eof = false, + path = Path, + mode = Mode, + options = Options, + is_write = is_writer(Mode), + is_read = is_reader(Mode), + last_used_at = undefined }), + {ok, Ref}. + +soft_close(Ref, Handle) -> + {Res, Handle1} = soft_close(Handle), + case Res of + ok -> put({Ref, fhc_handle}, Handle1), + true; + _ -> put_handle(Ref, Handle1), + false + end. + +soft_close(Handle = #handle { hdl = closed }) -> + {ok, Handle}; +soft_close(Handle) -> + case write_buffer(Handle) of + {ok, #handle { hdl = Hdl, + offset = Offset, + is_dirty = IsDirty, + last_used_at = Then } = Handle1 } -> + ok = case IsDirty of + true -> file:sync(Hdl); + false -> ok + end, + ok = file:close(Hdl), + age_tree_delete(Then), + {ok, Handle1 #handle { hdl = closed, + trusted_offset = Offset, + is_dirty = false, + last_used_at = undefined }}; + {_Error, _Handle} = Result -> + Result + end. + +hard_close(Handle) -> + case soft_close(Handle) of + {ok, #handle { path = Path, + is_read = IsReader, is_write = IsWriter }} -> + #file { reader_count = RCount, has_writer = HasWriter } = File = + get({Path, fhc_file}), + RCount1 = case IsReader of + true -> RCount - 1; + false -> RCount + end, + HasWriter1 = HasWriter andalso not IsWriter, + case RCount1 =:= 0 andalso not HasWriter1 of + true -> erase({Path, fhc_file}); + false -> put({Path, fhc_file}, + File #file { reader_count = RCount1, + has_writer = HasWriter1 }) + end, + ok; + {_Error, _Handle} = Result -> + Result + end. + +maybe_seek(NewOffset, Handle = #handle { hdl = Hdl, offset = Offset, + at_eof = AtEoF }) -> + {AtEoF1, NeedsSeek} = needs_seek(AtEoF, Offset, NewOffset), + case (case NeedsSeek of + true -> file:position(Hdl, NewOffset); + false -> {ok, Offset} + end) of + {ok, Offset1} = Result -> + {Result, Handle #handle { offset = Offset1, at_eof = AtEoF1 }}; + {error, _} = Error -> + {Error, Handle} + end. + +needs_seek( AtEoF, _CurOffset, cur ) -> {AtEoF, false}; +needs_seek( AtEoF, _CurOffset, {cur, 0}) -> {AtEoF, false}; +needs_seek( true, _CurOffset, eof ) -> {true , false}; +needs_seek( true, _CurOffset, {eof, 0}) -> {true , false}; +needs_seek( false, _CurOffset, eof ) -> {true , true }; +needs_seek( false, _CurOffset, {eof, 0}) -> {true , true }; +needs_seek( AtEoF, 0, bof ) -> {AtEoF, false}; +needs_seek( AtEoF, 0, {bof, 0}) -> {AtEoF, false}; +needs_seek( AtEoF, CurOffset, CurOffset) -> {AtEoF, false}; +needs_seek( true, CurOffset, {bof, DesiredOffset}) + when DesiredOffset >= CurOffset -> + {true, true}; +needs_seek( true, _CurOffset, {cur, DesiredOffset}) + when DesiredOffset > 0 -> + {true, true}; +needs_seek( true, CurOffset, DesiredOffset) %% same as {bof, DO} + when is_integer(DesiredOffset) andalso DesiredOffset >= CurOffset -> + {true, true}; +%% because we can't really track size, we could well end up at EoF and not know +needs_seek(_AtEoF, _CurOffset, _DesiredOffset) -> + {false, true}. + +write_buffer(Handle = #handle { write_buffer = [] }) -> + {ok, Handle}; +write_buffer(Handle = #handle { hdl = Hdl, offset = Offset, + write_buffer = WriteBuffer, + write_buffer_size = DataSize, + at_eof = true }) -> + case file:write(Hdl, lists:reverse(WriteBuffer)) of + ok -> + Offset1 = Offset + DataSize, + {ok, Handle #handle { offset = Offset1, is_dirty = true, + write_buffer = [], write_buffer_size = 0 }}; + {error, _} = Error -> + {Error, Handle} + end. + +%%---------------------------------------------------------------------------- +%% gen_server callbacks +%%---------------------------------------------------------------------------- + +init([]) -> + Limit = case application:get_env(file_handles_high_watermark) of + {ok, Watermark} when (is_integer(Watermark) andalso + Watermark > 0) -> + Watermark; + _ -> + case ulimit() of + infinity -> infinity; + unknown -> ?FILE_HANDLES_LIMIT_OTHER; + Lim -> lists:max([2, Lim - ?RESERVED_FOR_OTHERS]) + end + end, + ObtainLimit = obtain_limit(Limit), + error_logger:info_msg("Limiting to approx ~p file handles (~p sockets)~n", + [Limit, ObtainLimit]), + Clients = ets:new(?CLIENT_ETS_TABLE, [set, private, {keypos, #cstate.pid}]), + {ok, #fhc_state { elders = dict:new(), + limit = Limit, + open_count = 0, + open_pending = pending_new(), + obtain_limit = ObtainLimit, + obtain_count = 0, + obtain_pending = pending_new(), + clients = Clients, + timer_ref = undefined }}. + +handle_call({open, Pid, Requested, EldestUnusedSince}, From, + State = #fhc_state { open_count = Count, + open_pending = Pending, + elders = Elders, + clients = Clients }) + when EldestUnusedSince =/= undefined -> + Elders1 = dict:store(Pid, EldestUnusedSince, Elders), + Item = #pending { kind = open, + pid = Pid, + requested = Requested, + from = From }, + ok = track_client(Pid, Clients), + State1 = State #fhc_state { elders = Elders1 }, + case needs_reduce(State1 #fhc_state { open_count = Count + Requested }) of + true -> case ets:lookup(Clients, Pid) of + [#cstate { opened = 0 }] -> + true = ets:update_element( + Clients, Pid, {#cstate.blocked, true}), + {noreply, + reduce(State1 #fhc_state { + open_pending = pending_in(Item, Pending) })}; + [#cstate { opened = Opened }] -> + true = ets:update_element( + Clients, Pid, + {#cstate.pending_closes, Opened}), + {reply, close, State1} + end; + false -> {noreply, run_pending_item(Item, State1)} + end; + +handle_call({obtain, Pid}, From, State = #fhc_state { obtain_limit = Limit, + obtain_count = Count, + obtain_pending = Pending, + clients = Clients }) + when Limit =/= infinity andalso Count >= Limit -> + ok = track_client(Pid, Clients), + true = ets:update_element(Clients, Pid, {#cstate.blocked, true}), + Item = #pending { kind = obtain, pid = Pid, requested = 1, from = From }, + {noreply, State #fhc_state { obtain_pending = pending_in(Item, Pending) }}; +handle_call({obtain, Pid}, From, State = #fhc_state { obtain_count = Count, + obtain_pending = Pending, + clients = Clients }) -> + Item = #pending { kind = obtain, pid = Pid, requested = 1, from = From }, + ok = track_client(Pid, Clients), + case needs_reduce(State #fhc_state { obtain_count = Count + 1 }) of + true -> + true = ets:update_element(Clients, Pid, {#cstate.blocked, true}), + {noreply, reduce(State #fhc_state { + obtain_pending = pending_in(Item, Pending) })}; + false -> + {noreply, run_pending_item(Item, State)} + end; +handle_call({set_limit, Limit}, _From, State) -> + {reply, ok, maybe_reduce( + process_pending(State #fhc_state { + limit = Limit, + obtain_limit = obtain_limit(Limit) }))}; +handle_call(get_limit, _From, State = #fhc_state { limit = Limit }) -> + {reply, Limit, State}. + +handle_cast({register_callback, Pid, MFA}, + State = #fhc_state { clients = Clients }) -> + ok = track_client(Pid, Clients), + true = ets:update_element(Clients, Pid, {#cstate.callback, MFA}), + {noreply, State}; + +handle_cast({update, Pid, EldestUnusedSince}, + State = #fhc_state { elders = Elders }) + when EldestUnusedSince =/= undefined -> + Elders1 = dict:store(Pid, EldestUnusedSince, Elders), + %% don't call maybe_reduce from here otherwise we can create a + %% storm of messages + {noreply, State #fhc_state { elders = Elders1 }}; + +handle_cast({close, Pid, EldestUnusedSince}, + State = #fhc_state { elders = Elders, clients = Clients }) -> + Elders1 = case EldestUnusedSince of + undefined -> dict:erase(Pid, Elders); + _ -> dict:store(Pid, EldestUnusedSince, Elders) + end, + ets:update_counter(Clients, Pid, {#cstate.pending_closes, -1, 0, 0}), + {noreply, process_pending( + update_counts(open, Pid, -1, + State #fhc_state { elders = Elders1 }))}; + +handle_cast({transfer, FromPid, ToPid}, State) -> + ok = track_client(ToPid, State#fhc_state.clients), + {noreply, process_pending( + update_counts(obtain, ToPid, +1, + update_counts(obtain, FromPid, -1, State)))}; + +handle_cast(check_counts, State) -> + {noreply, maybe_reduce(State #fhc_state { timer_ref = undefined })}. + +handle_info({'DOWN', _MRef, process, Pid, _Reason}, + State = #fhc_state { elders = Elders, + open_count = OpenCount, + open_pending = OpenPending, + obtain_count = ObtainCount, + obtain_pending = ObtainPending, + clients = Clients }) -> + [#cstate { opened = Opened, obtained = Obtained }] = + ets:lookup(Clients, Pid), + true = ets:delete(Clients, Pid), + FilterFun = fun (#pending { pid = Pid1 }) -> Pid1 =/= Pid end, + {noreply, process_pending( + State #fhc_state { + open_count = OpenCount - Opened, + open_pending = filter_pending(FilterFun, OpenPending), + obtain_count = ObtainCount - Obtained, + obtain_pending = filter_pending(FilterFun, ObtainPending), + elders = dict:erase(Pid, Elders) })}. + +terminate(_Reason, State = #fhc_state { clients = Clients }) -> + ets:delete(Clients), + State. + +code_change(_OldVsn, State, _Extra) -> + {ok, State}. + +%%---------------------------------------------------------------------------- +%% pending queue abstraction helpers +%%---------------------------------------------------------------------------- + +queue_fold(Fun, Init, Q) -> + case queue:out(Q) of + {empty, _Q} -> Init; + {{value, V}, Q1} -> queue_fold(Fun, Fun(V, Init), Q1) + end. + +filter_pending(Fun, {Count, Queue}) -> + {Delta, Queue1} = + queue_fold(fun (Item, {DeltaN, QueueN}) -> + case Fun(Item) of + true -> {DeltaN, queue:in(Item, QueueN)}; + false -> {DeltaN - requested(Item), QueueN} + end + end, {0, queue:new()}, Queue), + {Count + Delta, Queue1}. + +pending_new() -> + {0, queue:new()}. + +pending_in(Item = #pending { requested = Requested }, {Count, Queue}) -> + {Count + Requested, queue:in(Item, Queue)}. + +pending_out({0, _Queue} = Pending) -> + {empty, Pending}; +pending_out({N, Queue}) -> + {{value, #pending { requested = Requested }} = Result, Queue1} = + queue:out(Queue), + {Result, {N - Requested, Queue1}}. + +pending_count({Count, _Queue}) -> + Count. + +pending_is_empty({0, _Queue}) -> + true; +pending_is_empty({_N, _Queue}) -> + false. + +%%---------------------------------------------------------------------------- +%% server helpers +%%---------------------------------------------------------------------------- + +obtain_limit(infinity) -> infinity; +obtain_limit(Limit) -> case ?OBTAIN_LIMIT(Limit) of + OLimit when OLimit < 0 -> 0; + OLimit -> OLimit + end. + +requested({_Kind, _Pid, Requested, _From}) -> + Requested. + +process_pending(State = #fhc_state { limit = infinity }) -> + State; +process_pending(State) -> + process_open(process_obtain(State)). + +process_open(State = #fhc_state { limit = Limit, + open_pending = Pending, + open_count = OpenCount, + obtain_count = ObtainCount }) -> + {Pending1, State1} = + process_pending(Pending, Limit - (ObtainCount + OpenCount), State), + State1 #fhc_state { open_pending = Pending1 }. + +process_obtain(State = #fhc_state { limit = Limit, + obtain_pending = Pending, + obtain_limit = ObtainLimit, + obtain_count = ObtainCount, + open_count = OpenCount }) -> + Quota = lists:min([ObtainLimit - ObtainCount, + Limit - (ObtainCount + OpenCount)]), + {Pending1, State1} = process_pending(Pending, Quota, State), + State1 #fhc_state { obtain_pending = Pending1 }. + +process_pending(Pending, Quota, State) when Quota =< 0 -> + {Pending, State}; +process_pending(Pending, Quota, State) -> + case pending_out(Pending) of + {empty, _Pending} -> + {Pending, State}; + {{value, #pending { requested = Requested }}, _Pending1} + when Requested > Quota -> + {Pending, State}; + {{value, #pending { requested = Requested } = Item}, Pending1} -> + process_pending(Pending1, Quota - Requested, + run_pending_item(Item, State)) + end. + +run_pending_item(#pending { kind = Kind, + pid = Pid, + requested = Requested, + from = From }, + State = #fhc_state { clients = Clients }) -> + gen_server:reply(From, ok), + true = ets:update_element(Clients, Pid, {#cstate.blocked, false}), + update_counts(Kind, Pid, Requested, State). + +update_counts(Kind, Pid, Delta, + State = #fhc_state { open_count = OpenCount, + obtain_count = ObtainCount, + clients = Clients }) -> + {OpenDelta, ObtainDelta} = update_counts1(Kind, Pid, Delta, Clients), + State #fhc_state { open_count = OpenCount + OpenDelta, + obtain_count = ObtainCount + ObtainDelta }. + +update_counts1(open, Pid, Delta, Clients) -> + ets:update_counter(Clients, Pid, {#cstate.opened, Delta}), + {Delta, 0}; +update_counts1(obtain, Pid, Delta, Clients) -> + ets:update_counter(Clients, Pid, {#cstate.obtained, Delta}), + {0, Delta}. + +maybe_reduce(State) -> + case needs_reduce(State) of + true -> reduce(State); + false -> State + end. + +needs_reduce(#fhc_state { limit = Limit, + open_count = OpenCount, + open_pending = OpenPending, + obtain_count = ObtainCount, + obtain_limit = ObtainLimit, + obtain_pending = ObtainPending }) -> + Limit =/= infinity + andalso ((OpenCount + ObtainCount > Limit) + orelse (not pending_is_empty(OpenPending)) + orelse (ObtainCount < ObtainLimit + andalso not pending_is_empty(ObtainPending))). + +reduce(State = #fhc_state { open_pending = OpenPending, + obtain_pending = ObtainPending, + elders = Elders, + clients = Clients, + timer_ref = TRef }) -> + Now = now(), + {CStates, Sum, ClientCount} = + dict:fold(fun (Pid, Eldest, {CStatesAcc, SumAcc, CountAcc} = Accs) -> + [#cstate { pending_closes = PendingCloses, + opened = Opened, + blocked = Blocked } = CState] = + ets:lookup(Clients, Pid), + case Blocked orelse PendingCloses =:= Opened of + true -> Accs; + false -> {[CState | CStatesAcc], + SumAcc + timer:now_diff(Now, Eldest), + CountAcc + 1} + end + end, {[], 0, 0}, Elders), + case CStates of + [] -> ok; + _ -> case (Sum / ClientCount) - + (1000 * ?FILE_HANDLES_CHECK_INTERVAL) of + AverageAge when AverageAge > 0 -> + notify_age(CStates, AverageAge); + _ -> + notify_age0(Clients, CStates, + pending_count(OpenPending) + + pending_count(ObtainPending)) + end + end, + case TRef of + undefined -> {ok, TRef1} = timer:apply_after( + ?FILE_HANDLES_CHECK_INTERVAL, + gen_server, cast, [?SERVER, check_counts]), + State #fhc_state { timer_ref = TRef1 }; + _ -> State + end. + +notify_age(CStates, AverageAge) -> + lists:foreach( + fun (#cstate { callback = undefined }) -> ok; + (#cstate { callback = {M, F, A} }) -> apply(M, F, A ++ [AverageAge]) + end, CStates). + +notify_age0(Clients, CStates, Required) -> + Notifications = + [CState || CState <- CStates, CState#cstate.callback =/= undefined], + {L1, L2} = lists:split(random:uniform(length(Notifications)), + Notifications), + notify(Clients, Required, L2 ++ L1). + +notify(_Clients, _Required, []) -> + ok; +notify(_Clients, Required, _Notifications) when Required =< 0 -> + ok; +notify(Clients, Required, [#cstate{ pid = Pid, + callback = {M, F, A}, + opened = Opened } | Notifications]) -> + apply(M, F, A ++ [0]), + ets:update_element(Clients, Pid, {#cstate.pending_closes, Opened}), + notify(Clients, Required - Opened, Notifications). + +track_client(Pid, Clients) -> + case ets:insert_new(Clients, #cstate { pid = Pid, + callback = undefined, + opened = 0, + obtained = 0, + blocked = false, + pending_closes = 0 }) of + true -> _MRef = erlang:monitor(process, Pid), + ok; + false -> ok + end. + +%% For all unices, assume ulimit exists. Further googling suggests +%% that BSDs (incl OS X), solaris and linux all agree that ulimit -n +%% is file handles +ulimit() -> + case os:type() of + {win32, _OsName} -> + ?FILE_HANDLES_LIMIT_WINDOWS; + {unix, _OsName} -> + %% Under Linux, Solaris and FreeBSD, ulimit is a shell + %% builtin, not a command. In OS X and AIX it's a command. + %% Fortunately, os:cmd invokes the cmd in a shell env, so + %% we're safe in all cases. + case os:cmd("ulimit -n") of + "unlimited" -> + infinity; + String = [C|_] when $0 =< C andalso C =< $9 -> + list_to_integer( + lists:takewhile( + fun (D) -> $0 =< D andalso D =< $9 end, String)); + _ -> + %% probably a variant of + %% "/bin/sh: line 1: ulimit: command not found\n" + unknown + end; + _ -> + unknown + end. diff -Nru rabbitmq-server-1.7.2/src/gatherer.erl rabbitmq-server-2.3.1/src/gatherer.erl --- rabbitmq-server-1.7.2/src/gatherer.erl 1970-01-01 00:00:00.000000000 +0000 +++ rabbitmq-server-2.3.1/src/gatherer.erl 2011-02-03 12:47:35.000000000 +0000 @@ -0,0 +1,130 @@ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License +%% at http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and +%% limitations under the License. +%% +%% The Original Code is RabbitMQ. +%% +%% The Initial Developer of the Original Code is VMware, Inc. +%% Copyright (c) 2007-2011 VMware, Inc. All rights reserved. +%% + +-module(gatherer). + +-behaviour(gen_server2). + +-export([start_link/0, stop/1, fork/1, finish/1, in/2, out/1]). + +-export([init/1, handle_call/3, handle_cast/2, handle_info/2, + terminate/2, code_change/3]). + +%%---------------------------------------------------------------------------- + +-ifdef(use_specs). + +-spec(start_link/0 :: () -> {'ok', pid()} | {'error', any()}). +-spec(stop/1 :: (pid()) -> 'ok'). +-spec(fork/1 :: (pid()) -> 'ok'). +-spec(finish/1 :: (pid()) -> 'ok'). +-spec(in/2 :: (pid(), any()) -> 'ok'). +-spec(out/1 :: (pid()) -> {'value', any()} | 'empty'). + +-endif. + +%%---------------------------------------------------------------------------- + +-define(HIBERNATE_AFTER_MIN, 1000). +-define(DESIRED_HIBERNATE, 10000). + +%%---------------------------------------------------------------------------- + +-record(gstate, { forks, values, blocked }). + +%%---------------------------------------------------------------------------- + +start_link() -> + gen_server2:start_link(?MODULE, [], [{timeout, infinity}]). + +stop(Pid) -> + gen_server2:call(Pid, stop, infinity). + +fork(Pid) -> + gen_server2:call(Pid, fork, infinity). + +finish(Pid) -> + gen_server2:cast(Pid, finish). + +in(Pid, Value) -> + gen_server2:cast(Pid, {in, Value}). + +out(Pid) -> + gen_server2:call(Pid, out, infinity). + +%%---------------------------------------------------------------------------- + +init([]) -> + {ok, #gstate { forks = 0, values = queue:new(), blocked = queue:new() }, + hibernate, + {backoff, ?HIBERNATE_AFTER_MIN, ?HIBERNATE_AFTER_MIN, ?DESIRED_HIBERNATE}}. + +handle_call(stop, _From, State) -> + {stop, normal, ok, State}; + +handle_call(fork, _From, State = #gstate { forks = Forks }) -> + {reply, ok, State #gstate { forks = Forks + 1 }, hibernate}; + +handle_call(out, From, State = #gstate { forks = Forks, + values = Values, + blocked = Blocked }) -> + case queue:out(Values) of + {empty, _} -> + case Forks of + 0 -> {reply, empty, State, hibernate}; + _ -> {noreply, + State #gstate { blocked = queue:in(From, Blocked) }, + hibernate} + end; + {{value, _Value} = V, NewValues} -> + {reply, V, State #gstate { values = NewValues }, hibernate} + end; + +handle_call(Msg, _From, State) -> + {stop, {unexpected_call, Msg}, State}. + +handle_cast(finish, State = #gstate { forks = Forks, blocked = Blocked }) -> + NewForks = Forks - 1, + NewBlocked = case NewForks of + 0 -> [gen_server2:reply(From, empty) || + From <- queue:to_list(Blocked)], + queue:new(); + _ -> Blocked + end, + {noreply, State #gstate { forks = NewForks, blocked = NewBlocked }, + hibernate}; + +handle_cast({in, Value}, State = #gstate { values = Values, + blocked = Blocked }) -> + {noreply, case queue:out(Blocked) of + {empty, _} -> + State #gstate { values = queue:in(Value, Values) }; + {{value, From}, NewBlocked} -> + gen_server2:reply(From, {value, Value}), + State #gstate { blocked = NewBlocked } + end, hibernate}; + +handle_cast(Msg, State) -> + {stop, {unexpected_cast, Msg}, State}. + +handle_info(Msg, State) -> + {stop, {unexpected_info, Msg}, State}. + +code_change(_OldVsn, State, _Extra) -> + {ok, State}. + +terminate(_Reason, State) -> + State. diff -Nru rabbitmq-server-1.7.2/src/gen_server2.erl rabbitmq-server-2.3.1/src/gen_server2.erl --- rabbitmq-server-1.7.2/src/gen_server2.erl 2010-02-15 16:01:03.000000000 +0000 +++ rabbitmq-server-2.3.1/src/gen_server2.erl 2011-02-03 12:47:35.000000000 +0000 @@ -16,10 +16,12 @@ %% The original code could reorder messages when communicating with a %% process on a remote node that was not currently connected. %% -%% 4) The new functions gen_server2:pcall/3, pcall/4, and pcast/3 -%% allow callers to attach priorities to requests. Requests with -%% higher priorities are processed before requests with lower -%% priorities. The default priority is 0. +%% 4) The callback module can optionally implement prioritise_call/3, +%% prioritise_cast/2 and prioritise_info/2. These functions take +%% Message, From and State or just Message and State and return a +%% single integer representing the priority attached to the message. +%% Messages with higher priorities are processed before requests with +%% lower priorities. The default priority is 0. %% %% 5) The callback module can optionally implement %% handle_pre_hibernate/1 and handle_post_hibernate/1. These will be @@ -57,23 +59,23 @@ %% being used. Instead it'll wait for the current timeout as described %% above. -%% All modifications are (C) 2009-2010 LShift Ltd. +%% All modifications are (C) 2009-2011 VMware, Inc. %% ``The contents of this file are subject to the Erlang Public License, %% Version 1.1, (the "License"); you may not use this file except in %% compliance with the License. You should have received a copy of the %% Erlang Public License along with this software. If not, it can be %% retrieved via the world wide web at http://www.erlang.org/. -%% +%% %% Software distributed under the License is distributed on an "AS IS" %% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See %% the License for the specific language governing rights and limitations %% under the License. -%% +%% %% The Initial Developer of the Original Code is Ericsson Utvecklings AB. %% Portions created by Ericsson are Copyright 1999, Ericsson Utvecklings %% AB. All Rights Reserved.'' -%% +%% %% $Id$ %% -module(gen_server2). @@ -82,13 +84,13 @@ %%% %%% The idea behind THIS server is that the user module %%% provides (different) functions to handle different -%%% kind of inputs. +%%% kind of inputs. %%% If the Parent process terminates the Module:terminate/2 %%% function is called. %%% %%% The user module should export: %%% -%%% init(Args) +%%% init(Args) %%% ==> {ok, State} %%% {ok, State, Timeout} %%% {ok, State, Timeout, Backoff} @@ -101,21 +103,21 @@ %%% {reply, Reply, State, Timeout} %%% {noreply, State} %%% {noreply, State, Timeout} -%%% {stop, Reason, Reply, State} +%%% {stop, Reason, Reply, State} %%% Reason = normal | shutdown | Term terminate(State) is called %%% %%% handle_cast(Msg, State) %%% %%% ==> {noreply, State} %%% {noreply, State, Timeout} -%%% {stop, Reason, State} +%%% {stop, Reason, State} %%% Reason = normal | shutdown | Term terminate(State) is called %%% %%% handle_info(Info, State) Info is e.g. {'EXIT', P, R}, {nodedown, N}, ... %%% %%% ==> {noreply, State} %%% {noreply, State, Timeout} -%%% {stop, Reason, State} +%%% {stop, Reason, State} %%% Reason = normal | shutdown | Term, terminate(State) is called %%% %%% terminate(Reason, State) Let the user module clean up @@ -159,37 +161,44 @@ %% API -export([start/3, start/4, - start_link/3, start_link/4, - call/2, call/3, pcall/3, pcall/4, - cast/2, pcast/3, reply/2, - abcast/2, abcast/3, - multi_call/2, multi_call/3, multi_call/4, - enter_loop/3, enter_loop/4, enter_loop/5, wake_hib/7]). + start_link/3, start_link/4, + call/2, call/3, + cast/2, reply/2, + abcast/2, abcast/3, + multi_call/2, multi_call/3, multi_call/4, + enter_loop/3, enter_loop/4, enter_loop/5, enter_loop/6, wake_hib/1]). -export([behaviour_info/1]). %% System exports -export([system_continue/3, - system_terminate/4, - system_code_change/4, - format_status/2]). + system_terminate/4, + system_code_change/4, + format_status/2]). %% Internal exports --export([init_it/6, print_event/3]). +-export([init_it/6]). -import(error_logger, [format/2]). +%% State record +-record(gs2_state, {parent, name, state, mod, time, + timeout_state, queue, debug, prioritise_call, + prioritise_cast, prioritise_info}). + %%%========================================================================= %%% Specs. These exist only to shut up dialyzer's warnings %%%========================================================================= -ifdef(use_specs). --spec(handle_common_termination/6 :: - (any(), any(), any(), atom(), any(), any()) -> no_return()). +-type(gs2_state() :: #gs2_state{}). --spec(hibernate/7 :: - (pid(), any(), any(), atom(), any(), queue(), any()) -> no_return()). +-spec(handle_common_termination/3 :: + (any(), atom(), gs2_state()) -> no_return()). +-spec(hibernate/1 :: (gs2_state()) -> no_return()). +-spec(pre_hibernate/1 :: (gs2_state()) -> no_return()). +-spec(system_terminate/4 :: (_, _, _, gs2_state()) -> no_return()). -endif. @@ -238,37 +247,21 @@ %% be monitored. %% If the client is trapping exits and is linked server termination %% is handled here (? Shall we do that here (or rely on timeouts) ?). -%% ----------------------------------------------------------------- +%% ----------------------------------------------------------------- call(Name, Request) -> case catch gen:call(Name, '$gen_call', Request) of - {ok,Res} -> - Res; - {'EXIT',Reason} -> - exit({Reason, {?MODULE, call, [Name, Request]}}) + {ok,Res} -> + Res; + {'EXIT',Reason} -> + exit({Reason, {?MODULE, call, [Name, Request]}}) end. call(Name, Request, Timeout) -> case catch gen:call(Name, '$gen_call', Request, Timeout) of - {ok,Res} -> - Res; - {'EXIT',Reason} -> - exit({Reason, {?MODULE, call, [Name, Request, Timeout]}}) - end. - -pcall(Name, Priority, Request) -> - case catch gen:call(Name, '$gen_pcall', {Priority, Request}) of - {ok,Res} -> - Res; - {'EXIT',Reason} -> - exit({Reason, {?MODULE, pcall, [Name, Priority, Request]}}) - end. - -pcall(Name, Priority, Request, Timeout) -> - case catch gen:call(Name, '$gen_pcall', {Priority, Request}, Timeout) of - {ok,Res} -> - Res; - {'EXIT',Reason} -> - exit({Reason, {?MODULE, pcall, [Name, Priority, Request, Timeout]}}) + {ok,Res} -> + Res; + {'EXIT',Reason} -> + exit({Reason, {?MODULE, call, [Name, Request, Timeout]}}) end. %% ----------------------------------------------------------------- @@ -277,34 +270,18 @@ cast({global,Name}, Request) -> catch global:send(Name, cast_msg(Request)), ok; -cast({Name,Node}=Dest, Request) when is_atom(Name), is_atom(Node) -> +cast({Name,Node}=Dest, Request) when is_atom(Name), is_atom(Node) -> do_cast(Dest, Request); cast(Dest, Request) when is_atom(Dest) -> do_cast(Dest, Request); cast(Dest, Request) when is_pid(Dest) -> do_cast(Dest, Request). -do_cast(Dest, Request) -> +do_cast(Dest, Request) -> do_send(Dest, cast_msg(Request)), ok. - -cast_msg(Request) -> {'$gen_cast',Request}. -pcast({global,Name}, Priority, Request) -> - catch global:send(Name, cast_msg(Priority, Request)), - ok; -pcast({Name,Node}=Dest, Priority, Request) when is_atom(Name), is_atom(Node) -> - do_cast(Dest, Priority, Request); -pcast(Dest, Priority, Request) when is_atom(Dest) -> - do_cast(Dest, Priority, Request); -pcast(Dest, Priority, Request) when is_pid(Dest) -> - do_cast(Dest, Priority, Request). - -do_cast(Dest, Priority, Request) -> - do_send(Dest, cast_msg(Priority, Request)), - ok. - -cast_msg(Priority, Request) -> {'$gen_pcast', {Priority, Request}}. +cast_msg(Request) -> {'$gen_cast',Request}. %% ----------------------------------------------------------------- %% Send a reply to the client. @@ -312,9 +289,9 @@ reply({To, Tag}, Reply) -> catch To ! {Tag, Reply}. -%% ----------------------------------------------------------------- -%% Asyncronous broadcast, returns nothing, it's just send'n prey -%%----------------------------------------------------------------- +%% ----------------------------------------------------------------- +%% Asyncronous broadcast, returns nothing, it's just send'n pray +%% ----------------------------------------------------------------- abcast(Name, Request) when is_atom(Name) -> do_abcast([node() | nodes()], Name, cast_msg(Request)). @@ -330,36 +307,36 @@ %%% Make a call to servers at several nodes. %%% Returns: {[Replies],[BadNodes]} %%% A Timeout can be given -%%% +%%% %%% A middleman process is used in case late answers arrives after %%% the timeout. If they would be allowed to glog the callers message -%%% queue, it would probably become confused. Late answers will +%%% queue, it would probably become confused. Late answers will %%% now arrive to the terminated middleman and so be discarded. %%% ----------------------------------------------------------------- multi_call(Name, Req) when is_atom(Name) -> do_multi_call([node() | nodes()], Name, Req, infinity). -multi_call(Nodes, Name, Req) +multi_call(Nodes, Name, Req) when is_list(Nodes), is_atom(Name) -> do_multi_call(Nodes, Name, Req, infinity). multi_call(Nodes, Name, Req, infinity) -> do_multi_call(Nodes, Name, Req, infinity); -multi_call(Nodes, Name, Req, Timeout) +multi_call(Nodes, Name, Req, Timeout) when is_list(Nodes), is_atom(Name), is_integer(Timeout), Timeout >= 0 -> do_multi_call(Nodes, Name, Req, Timeout). %%----------------------------------------------------------------- -%% enter_loop(Mod, Options, State, , , ) ->_ -%% -%% Description: Makes an existing process into a gen_server. -%% The calling process will enter the gen_server receive +%% enter_loop(Mod, Options, State, , , ) ->_ +%% +%% Description: Makes an existing process into a gen_server. +%% The calling process will enter the gen_server receive %% loop and become a gen_server process. -%% The process *must* have been started using one of the -%% start functions in proc_lib, see proc_lib(3). -%% The user is responsible for any initialization of the +%% The process *must* have been started using one of the +%% start functions in proc_lib, see proc_lib(3). +%% The user is responsible for any initialization of the %% process, including registering a name for it. %%----------------------------------------------------------------- enter_loop(Mod, Options, State) -> @@ -386,7 +363,10 @@ Debug = debug_options(Name, Options), Queue = priority_queue:new(), Backoff1 = extend_backoff(Backoff), - loop(Parent, Name, State, Mod, Timeout, Backoff1, Queue, Debug). + loop(find_prioritisers( + #gs2_state { parent = Parent, name = Name, state = State, + mod = Mod, time = Timeout, timeout_state = Backoff1, + queue = Queue, debug = Debug })). %%%======================================================================== %%% Gen-callback functions @@ -405,45 +385,57 @@ Name = name(Name0), Debug = debug_options(Name, Options), Queue = priority_queue:new(), + GS2State = find_prioritisers( + #gs2_state { parent = Parent, + name = Name, + mod = Mod, + queue = Queue, + debug = Debug }), case catch Mod:init(Args) of - {ok, State} -> - proc_lib:init_ack(Starter, {ok, self()}), - loop(Parent, Name, State, Mod, infinity, undefined, Queue, Debug); - {ok, State, Timeout} -> - proc_lib:init_ack(Starter, {ok, self()}), - loop(Parent, Name, State, Mod, Timeout, undefined, Queue, Debug); - {ok, State, Timeout, Backoff = {backoff, _, _, _}} -> + {ok, State} -> + proc_lib:init_ack(Starter, {ok, self()}), + loop(GS2State #gs2_state { state = State, + time = infinity, + timeout_state = undefined }); + {ok, State, Timeout} -> + proc_lib:init_ack(Starter, {ok, self()}), + loop(GS2State #gs2_state { state = State, + time = Timeout, + timeout_state = undefined }); + {ok, State, Timeout, Backoff = {backoff, _, _, _}} -> Backoff1 = extend_backoff(Backoff), - proc_lib:init_ack(Starter, {ok, self()}), - loop(Parent, Name, State, Mod, Timeout, Backoff1, Queue, Debug); - {stop, Reason} -> - %% For consistency, we must make sure that the - %% registered name (if any) is unregistered before - %% the parent process is notified about the failure. - %% (Otherwise, the parent process could get - %% an 'already_started' error if it immediately - %% tried starting the process again.) - unregister_name(Name0), - proc_lib:init_ack(Starter, {error, Reason}), - exit(Reason); - ignore -> - unregister_name(Name0), - proc_lib:init_ack(Starter, ignore), - exit(normal); - {'EXIT', Reason} -> - unregister_name(Name0), - proc_lib:init_ack(Starter, {error, Reason}), - exit(Reason); - Else -> - Error = {bad_return_value, Else}, - proc_lib:init_ack(Starter, {error, Error}), - exit(Error) + proc_lib:init_ack(Starter, {ok, self()}), + loop(GS2State #gs2_state { state = State, + time = Timeout, + timeout_state = Backoff1 }); + {stop, Reason} -> + %% For consistency, we must make sure that the + %% registered name (if any) is unregistered before + %% the parent process is notified about the failure. + %% (Otherwise, the parent process could get + %% an 'already_started' error if it immediately + %% tried starting the process again.) + unregister_name(Name0), + proc_lib:init_ack(Starter, {error, Reason}), + exit(Reason); + ignore -> + unregister_name(Name0), + proc_lib:init_ack(Starter, ignore), + exit(normal); + {'EXIT', Reason} -> + unregister_name(Name0), + proc_lib:init_ack(Starter, {error, Reason}), + exit(Reason); + Else -> + Error = {bad_return_value, Else}, + proc_lib:init_ack(Starter, {error, Error}), + exit(Error) end. name({local,Name}) -> Name; name({global,Name}) -> Name; %% name(Pid) when is_pid(Pid) -> Pid; -%% when R11 goes away, drop the line beneath and uncomment the line above +%% when R12 goes away, drop the line beneath and uncomment the line above name(Name) -> Name. unregister_name({local,Name}) -> @@ -467,23 +459,24 @@ %%% --------------------------------------------------- %%% The MAIN loop. %%% --------------------------------------------------- -loop(Parent, Name, State, Mod, hibernate, undefined, Queue, Debug) -> - pre_hibernate(Parent, Name, State, Mod, undefined, Queue, Debug); -loop(Parent, Name, State, Mod, Time, TimeoutState, Queue, Debug) -> - process_next_msg(Parent, Name, State, Mod, Time, TimeoutState, - drain(Queue), Debug). +loop(GS2State = #gs2_state { time = hibernate, + timeout_state = undefined }) -> + pre_hibernate(GS2State); +loop(GS2State) -> + process_next_msg(drain(GS2State)). -drain(Queue) -> +drain(GS2State) -> receive - Input -> drain(in(Input, Queue)) - after 0 -> Queue + Input -> drain(in(Input, GS2State)) + after 0 -> GS2State end. -process_next_msg(Parent, Name, State, Mod, Time, TimeoutState, Queue, Debug) -> +process_next_msg(GS2State = #gs2_state { time = Time, + timeout_state = TimeoutState, + queue = Queue }) -> case priority_queue:out(Queue) of {{value, Msg}, Queue1} -> - process_msg(Parent, Name, State, Mod, - Time, TimeoutState, Queue1, Debug, Msg); + process_msg(Msg, GS2State #gs2_state { queue = Queue1 }); {empty, Queue1} -> {Time1, HibOnTimeout} = case {Time, TimeoutState} of @@ -504,68 +497,64 @@ Input -> %% Time could be 'hibernate' here, so *don't* call loop process_next_msg( - Parent, Name, State, Mod, Time, TimeoutState, - drain(in(Input, Queue1)), Debug) + drain(in(Input, GS2State #gs2_state { queue = Queue1 }))) after Time1 -> case HibOnTimeout of true -> pre_hibernate( - Parent, Name, State, Mod, TimeoutState, Queue1, - Debug); + GS2State #gs2_state { queue = Queue1 }); false -> - process_msg( - Parent, Name, State, Mod, Time, TimeoutState, - Queue1, Debug, timeout) + process_msg(timeout, + GS2State #gs2_state { queue = Queue1 }) end end end. -wake_hib(Parent, Name, State, Mod, TS, Queue, Debug) -> +wake_hib(GS2State = #gs2_state { timeout_state = TS }) -> TimeoutState1 = case TS of undefined -> undefined; {SleptAt, TimeoutState} -> adjust_timeout_state(SleptAt, now(), TimeoutState) end, - post_hibernate(Parent, Name, State, Mod, TimeoutState1, - drain(Queue), Debug). + post_hibernate( + drain(GS2State #gs2_state { timeout_state = TimeoutState1 })). -hibernate(Parent, Name, State, Mod, TimeoutState, Queue, Debug) -> +hibernate(GS2State = #gs2_state { timeout_state = TimeoutState }) -> TS = case TimeoutState of undefined -> undefined; {backoff, _, _, _, _} -> {now(), TimeoutState} end, - proc_lib:hibernate(?MODULE, wake_hib, [Parent, Name, State, Mod, - TS, Queue, Debug]). + proc_lib:hibernate(?MODULE, wake_hib, + [GS2State #gs2_state { timeout_state = TS }]). -pre_hibernate(Parent, Name, State, Mod, TimeoutState, Queue, Debug) -> +pre_hibernate(GS2State = #gs2_state { state = State, + mod = Mod }) -> case erlang:function_exported(Mod, handle_pre_hibernate, 1) of true -> case catch Mod:handle_pre_hibernate(State) of {hibernate, NState} -> - hibernate(Parent, Name, NState, Mod, TimeoutState, Queue, - Debug); + hibernate(GS2State #gs2_state { state = NState } ); Reply -> - handle_common_termination(Reply, Name, pre_hibernate, - Mod, State, Debug) + handle_common_termination(Reply, pre_hibernate, GS2State) end; false -> - hibernate(Parent, Name, State, Mod, TimeoutState, Queue, Debug) + hibernate(GS2State) end. -post_hibernate(Parent, Name, State, Mod, TimeoutState, Queue, Debug) -> +post_hibernate(GS2State = #gs2_state { state = State, + mod = Mod }) -> case erlang:function_exported(Mod, handle_post_hibernate, 1) of true -> case catch Mod:handle_post_hibernate(State) of {noreply, NState} -> - process_next_msg(Parent, Name, NState, Mod, infinity, - TimeoutState, Queue, Debug); + process_next_msg(GS2State #gs2_state { state = NState, + time = infinity }); {noreply, NState, Time} -> - process_next_msg(Parent, Name, NState, Mod, Time, - TimeoutState, Queue, Debug); + process_next_msg(GS2State #gs2_state { state = NState, + time = Time }); Reply -> - handle_common_termination(Reply, Name, post_hibernate, - Mod, State, Debug) + handle_common_termination(Reply, post_hibernate, GS2State) end; false -> %% use hibernate here, not infinity. This matches @@ -574,8 +563,7 @@ %% still set to hibernate, iff that msg is the very msg %% that woke us up (or the first msg we receive after %% waking up). - process_next_msg(Parent, Name, State, Mod, hibernate, - TimeoutState, Queue, Debug) + process_next_msg(GS2State #gs2_state { time = hibernate }) end. adjust_timeout_state(SleptAt, AwokeAt, {backoff, CurrentTO, MinimumTO, @@ -596,32 +584,40 @@ CurrentTO1 = Base + Extra, {backoff, CurrentTO1, MinimumTO, DesiredHibPeriod, RandomState1}. -in({'$gen_pcast', {Priority, Msg}}, Queue) -> - priority_queue:in({'$gen_cast', Msg}, Priority, Queue); -in({'$gen_pcall', From, {Priority, Msg}}, Queue) -> - priority_queue:in({'$gen_call', From, Msg}, Priority, Queue); -in(Input, Queue) -> - priority_queue:in(Input, Queue). - -process_msg(Parent, Name, State, Mod, Time, TimeoutState, Queue, - Debug, Msg) -> +in({'$gen_cast', Msg}, GS2State = #gs2_state { prioritise_cast = PC, + queue = Queue }) -> + GS2State #gs2_state { queue = priority_queue:in( + {'$gen_cast', Msg}, + PC(Msg, GS2State), Queue) }; +in({'$gen_call', From, Msg}, GS2State = #gs2_state { prioritise_call = PC, + queue = Queue }) -> + GS2State #gs2_state { queue = priority_queue:in( + {'$gen_call', From, Msg}, + PC(Msg, From, GS2State), Queue) }; +in(Input, GS2State = #gs2_state { prioritise_info = PI, queue = Queue }) -> + GS2State #gs2_state { queue = priority_queue:in( + Input, PI(Input, GS2State), Queue) }. + +process_msg(Msg, + GS2State = #gs2_state { parent = Parent, + name = Name, + debug = Debug }) -> case Msg of - {system, From, Req} -> - sys:handle_system_msg - (Req, From, Parent, ?MODULE, Debug, - [Name, State, Mod, Time, TimeoutState, Queue]); + {system, From, Req} -> + sys:handle_system_msg( + Req, From, Parent, ?MODULE, Debug, + GS2State); %% gen_server puts Hib on the end as the 7th arg, but that %% version of the function seems not to be documented so %% leaving out for now. - {'EXIT', Parent, Reason} -> - terminate(Reason, Name, Msg, Mod, State, Debug); - _Msg when Debug =:= [] -> - handle_msg(Msg, Parent, Name, State, Mod, TimeoutState, Queue); - _Msg -> - Debug1 = sys:handle_debug(Debug, {?MODULE, print_event}, - Name, {in, Msg}), - handle_msg(Msg, Parent, Name, State, Mod, TimeoutState, Queue, - Debug1) + {'EXIT', Parent, Reason} -> + terminate(Reason, Msg, GS2State); + _Msg when Debug =:= [] -> + handle_msg(Msg, GS2State); + _Msg -> + Debug1 = sys:handle_debug(Debug, fun print_event/3, + Name, {in, Msg}), + handle_msg(Msg, GS2State #gs2_state { debug = Debug1 }) end. %%% --------------------------------------------------- @@ -638,35 +634,35 @@ Tag = make_ref(), Caller = self(), Receiver = - spawn( - fun() -> - %% Middleman process. Should be unsensitive to regular - %% exit signals. The sychronization is needed in case - %% the receiver would exit before the caller started - %% the monitor. - process_flag(trap_exit, true), - Mref = erlang:monitor(process, Caller), - receive - {Caller,Tag} -> - Monitors = send_nodes(Nodes, Name, Tag, Req), - TimerId = erlang:start_timer(Timeout, self(), ok), - Result = rec_nodes(Tag, Monitors, Name, TimerId), - exit({self(),Tag,Result}); - {'DOWN',Mref,_,_,_} -> - %% Caller died before sending us the go-ahead. - %% Give up silently. - exit(normal) - end - end), + spawn( + fun () -> + %% Middleman process. Should be unsensitive to regular + %% exit signals. The sychronization is needed in case + %% the receiver would exit before the caller started + %% the monitor. + process_flag(trap_exit, true), + Mref = erlang:monitor(process, Caller), + receive + {Caller,Tag} -> + Monitors = send_nodes(Nodes, Name, Tag, Req), + TimerId = erlang:start_timer(Timeout, self(), ok), + Result = rec_nodes(Tag, Monitors, Name, TimerId), + exit({self(),Tag,Result}); + {'DOWN',Mref,_,_,_} -> + %% Caller died before sending us the go-ahead. + %% Give up silently. + exit(normal) + end + end), Mref = erlang:monitor(process, Receiver), Receiver ! {self(),Tag}, receive - {'DOWN',Mref,_,_,{Receiver,Tag,Result}} -> - Result; - {'DOWN',Mref,_,_,Reason} -> - %% The middleman code failed. Or someone did - %% exit(_, kill) on the middleman process => Reason==killed - exit(Reason) + {'DOWN',Mref,_,_,{Receiver,Tag,Result}} -> + Result; + {'DOWN',Mref,_,_,Reason} -> + %% The middleman code failed. Or someone did + %% exit(_, kill) on the middleman process => Reason==killed + exit(Reason) end. send_nodes(Nodes, Name, Tag, Req) -> @@ -681,7 +677,7 @@ send_nodes([_Node|Tail], Name, Tag, Req, Monitors) -> %% Skip non-atom Node send_nodes(Tail, Name, Tag, Req, Monitors); -send_nodes([], _Name, _Tag, _Req, Monitors) -> +send_nodes([], _Name, _Tag, _Req, Monitors) -> Monitors. %% Against old nodes: @@ -691,89 +687,89 @@ %% Against contemporary nodes: %% Wait for reply, server 'DOWN', or timeout from TimerId. -rec_nodes(Tag, Nodes, Name, TimerId) -> +rec_nodes(Tag, Nodes, Name, TimerId) -> rec_nodes(Tag, Nodes, Name, [], [], 2000, TimerId). rec_nodes(Tag, [{N,R}|Tail], Name, Badnodes, Replies, Time, TimerId ) -> receive - {'DOWN', R, _, _, _} -> - rec_nodes(Tag, Tail, Name, [N|Badnodes], Replies, Time, TimerId); - {{Tag, N}, Reply} -> %% Tag is bound !!! - unmonitor(R), - rec_nodes(Tag, Tail, Name, Badnodes, - [{N,Reply}|Replies], Time, TimerId); - {timeout, TimerId, _} -> - unmonitor(R), - %% Collect all replies that already have arrived - rec_nodes_rest(Tag, Tail, Name, [N|Badnodes], Replies) + {'DOWN', R, _, _, _} -> + rec_nodes(Tag, Tail, Name, [N|Badnodes], Replies, Time, TimerId); + {{Tag, N}, Reply} -> %% Tag is bound !!! + unmonitor(R), + rec_nodes(Tag, Tail, Name, Badnodes, + [{N,Reply}|Replies], Time, TimerId); + {timeout, TimerId, _} -> + unmonitor(R), + %% Collect all replies that already have arrived + rec_nodes_rest(Tag, Tail, Name, [N|Badnodes], Replies) end; rec_nodes(Tag, [N|Tail], Name, Badnodes, Replies, Time, TimerId) -> %% R6 node receive - {nodedown, N} -> - monitor_node(N, false), - rec_nodes(Tag, Tail, Name, [N|Badnodes], Replies, 2000, TimerId); - {{Tag, N}, Reply} -> %% Tag is bound !!! - receive {nodedown, N} -> ok after 0 -> ok end, - monitor_node(N, false), - rec_nodes(Tag, Tail, Name, Badnodes, - [{N,Reply}|Replies], 2000, TimerId); - {timeout, TimerId, _} -> - receive {nodedown, N} -> ok after 0 -> ok end, - monitor_node(N, false), - %% Collect all replies that already have arrived - rec_nodes_rest(Tag, Tail, Name, [N | Badnodes], Replies) + {nodedown, N} -> + monitor_node(N, false), + rec_nodes(Tag, Tail, Name, [N|Badnodes], Replies, 2000, TimerId); + {{Tag, N}, Reply} -> %% Tag is bound !!! + receive {nodedown, N} -> ok after 0 -> ok end, + monitor_node(N, false), + rec_nodes(Tag, Tail, Name, Badnodes, + [{N,Reply}|Replies], 2000, TimerId); + {timeout, TimerId, _} -> + receive {nodedown, N} -> ok after 0 -> ok end, + monitor_node(N, false), + %% Collect all replies that already have arrived + rec_nodes_rest(Tag, Tail, Name, [N | Badnodes], Replies) after Time -> - case rpc:call(N, erlang, whereis, [Name]) of - Pid when is_pid(Pid) -> % It exists try again. - rec_nodes(Tag, [N|Tail], Name, Badnodes, - Replies, infinity, TimerId); - _ -> % badnode - receive {nodedown, N} -> ok after 0 -> ok end, - monitor_node(N, false), - rec_nodes(Tag, Tail, Name, [N|Badnodes], - Replies, 2000, TimerId) - end + case rpc:call(N, erlang, whereis, [Name]) of + Pid when is_pid(Pid) -> % It exists try again. + rec_nodes(Tag, [N|Tail], Name, Badnodes, + Replies, infinity, TimerId); + _ -> % badnode + receive {nodedown, N} -> ok after 0 -> ok end, + monitor_node(N, false), + rec_nodes(Tag, Tail, Name, [N|Badnodes], + Replies, 2000, TimerId) + end end; rec_nodes(_, [], _, Badnodes, Replies, _, TimerId) -> case catch erlang:cancel_timer(TimerId) of - false -> % It has already sent it's message - receive - {timeout, TimerId, _} -> ok - after 0 -> - ok - end; - _ -> % Timer was cancelled, or TimerId was 'undefined' - ok + false -> % It has already sent it's message + receive + {timeout, TimerId, _} -> ok + after 0 -> + ok + end; + _ -> % Timer was cancelled, or TimerId was 'undefined' + ok end, {Replies, Badnodes}. %% Collect all replies that already have arrived rec_nodes_rest(Tag, [{N,R}|Tail], Name, Badnodes, Replies) -> receive - {'DOWN', R, _, _, _} -> - rec_nodes_rest(Tag, Tail, Name, [N|Badnodes], Replies); - {{Tag, N}, Reply} -> %% Tag is bound !!! - unmonitor(R), - rec_nodes_rest(Tag, Tail, Name, Badnodes, [{N,Reply}|Replies]) + {'DOWN', R, _, _, _} -> + rec_nodes_rest(Tag, Tail, Name, [N|Badnodes], Replies); + {{Tag, N}, Reply} -> %% Tag is bound !!! + unmonitor(R), + rec_nodes_rest(Tag, Tail, Name, Badnodes, [{N,Reply}|Replies]) after 0 -> - unmonitor(R), - rec_nodes_rest(Tag, Tail, Name, [N|Badnodes], Replies) + unmonitor(R), + rec_nodes_rest(Tag, Tail, Name, [N|Badnodes], Replies) end; rec_nodes_rest(Tag, [N|Tail], Name, Badnodes, Replies) -> %% R6 node receive - {nodedown, N} -> - monitor_node(N, false), - rec_nodes_rest(Tag, Tail, Name, [N|Badnodes], Replies); - {{Tag, N}, Reply} -> %% Tag is bound !!! - receive {nodedown, N} -> ok after 0 -> ok end, - monitor_node(N, false), - rec_nodes_rest(Tag, Tail, Name, Badnodes, [{N,Reply}|Replies]) + {nodedown, N} -> + monitor_node(N, false), + rec_nodes_rest(Tag, Tail, Name, [N|Badnodes], Replies); + {{Tag, N}, Reply} -> %% Tag is bound !!! + receive {nodedown, N} -> ok after 0 -> ok end, + monitor_node(N, false), + rec_nodes_rest(Tag, Tail, Name, Badnodes, [{N,Reply}|Replies]) after 0 -> - receive {nodedown, N} -> ok after 0 -> ok end, - monitor_node(N, false), - rec_nodes_rest(Tag, Tail, Name, [N|Badnodes], Replies) + receive {nodedown, N} -> ok after 0 -> ok end, + monitor_node(N, false), + rec_nodes_rest(Tag, Tail, Name, [N|Badnodes], Replies) end; rec_nodes_rest(_Tag, [], _Name, Badnodes, Replies) -> {Replies, Badnodes}. @@ -785,28 +781,28 @@ start_monitor(Node, Name) when is_atom(Node), is_atom(Name) -> if node() =:= nonode@nohost, Node =/= nonode@nohost -> - Ref = make_ref(), - self() ! {'DOWN', Ref, process, {Name, Node}, noconnection}, - {Node, Ref}; + Ref = make_ref(), + self() ! {'DOWN', Ref, process, {Name, Node}, noconnection}, + {Node, Ref}; true -> - case catch erlang:monitor(process, {Name, Node}) of - {'EXIT', _} -> - %% Remote node is R6 - monitor_node(Node, true), - Node; - Ref when is_reference(Ref) -> - {Node, Ref} - end + case catch erlang:monitor(process, {Name, Node}) of + {'EXIT', _} -> + %% Remote node is R6 + monitor_node(Node, true), + Node; + Ref when is_reference(Ref) -> + {Node, Ref} + end end. %% Cancels a monitor started with Ref=erlang:monitor(_, _). unmonitor(Ref) when is_reference(Ref) -> erlang:demonitor(Ref), receive - {'DOWN', Ref, _, _, _} -> - true + {'DOWN', Ref, _, _, _} -> + true after 0 -> - true + true end. %%% --------------------------------------------------- @@ -818,130 +814,110 @@ dispatch(Info, Mod, State) -> Mod:handle_info(Info, State). -handle_msg({'$gen_call', From, Msg}, - Parent, Name, State, Mod, TimeoutState, Queue) -> - case catch Mod:handle_call(Msg, From, State) of - {reply, Reply, NState} -> - reply(From, Reply), - loop(Parent, Name, NState, Mod, infinity, TimeoutState, Queue, []); - {reply, Reply, NState, Time1} -> - reply(From, Reply), - loop(Parent, Name, NState, Mod, Time1, TimeoutState, Queue, []); - {noreply, NState} -> - loop(Parent, Name, NState, Mod, infinity, TimeoutState, Queue, []); - {noreply, NState, Time1} -> - loop(Parent, Name, NState, Mod, Time1, TimeoutState, Queue, []); - {stop, Reason, Reply, NState} -> - {'EXIT', R} = - (catch terminate(Reason, Name, Msg, Mod, NState, [])), - reply(From, Reply), - exit(R); - Other -> handle_common_reply(Other, Parent, Name, Msg, Mod, State, - TimeoutState, Queue) - end; -handle_msg(Msg, - Parent, Name, State, Mod, TimeoutState, Queue) -> - Reply = (catch dispatch(Msg, Mod, State)), - handle_common_reply(Reply, Parent, Name, Msg, Mod, State, - TimeoutState, Queue). - -handle_msg({'$gen_call', From, Msg}, - Parent, Name, State, Mod, TimeoutState, Queue, Debug) -> +common_reply(_Name, From, Reply, _NState, [] = _Debug) -> + reply(From, Reply), + []; +common_reply(Name, From, Reply, NState, Debug) -> + reply(Name, From, Reply, NState, Debug). + +common_debug([] = _Debug, _Func, _Info, _Event) -> + []; +common_debug(Debug, Func, Info, Event) -> + sys:handle_debug(Debug, Func, Info, Event). + +handle_msg({'$gen_call', From, Msg}, GS2State = #gs2_state { mod = Mod, + state = State, + name = Name, + debug = Debug }) -> case catch Mod:handle_call(Msg, From, State) of - {reply, Reply, NState} -> - Debug1 = reply(Name, From, Reply, NState, Debug), - loop(Parent, Name, NState, Mod, infinity, TimeoutState, Queue, - Debug1); - {reply, Reply, NState, Time1} -> - Debug1 = reply(Name, From, Reply, NState, Debug), - loop(Parent, Name, NState, Mod, Time1, TimeoutState, Queue, Debug1); - {noreply, NState} -> - Debug1 = sys:handle_debug(Debug, {?MODULE, print_event}, Name, - {noreply, NState}), - loop(Parent, Name, NState, Mod, infinity, TimeoutState, Queue, - Debug1); - {noreply, NState, Time1} -> - Debug1 = sys:handle_debug(Debug, {?MODULE, print_event}, Name, - {noreply, NState}), - loop(Parent, Name, NState, Mod, Time1, TimeoutState, Queue, Debug1); - {stop, Reason, Reply, NState} -> - {'EXIT', R} = - (catch terminate(Reason, Name, Msg, Mod, NState, Debug)), - reply(Name, From, Reply, NState, Debug), - exit(R); - Other -> - handle_common_reply(Other, Parent, Name, Msg, Mod, State, - TimeoutState, Queue, Debug) + {reply, Reply, NState} -> + Debug1 = common_reply(Name, From, Reply, NState, Debug), + loop(GS2State #gs2_state { state = NState, + time = infinity, + debug = Debug1 }); + {reply, Reply, NState, Time1} -> + Debug1 = common_reply(Name, From, Reply, NState, Debug), + loop(GS2State #gs2_state { state = NState, + time = Time1, + debug = Debug1}); + {noreply, NState} -> + Debug1 = common_debug(Debug, fun print_event/3, Name, + {noreply, NState}), + loop(GS2State #gs2_state {state = NState, + time = infinity, + debug = Debug1}); + {noreply, NState, Time1} -> + Debug1 = common_debug(Debug, fun print_event/3, Name, + {noreply, NState}), + loop(GS2State #gs2_state {state = NState, + time = Time1, + debug = Debug1}); + {stop, Reason, Reply, NState} -> + {'EXIT', R} = + (catch terminate(Reason, Msg, + GS2State #gs2_state { state = NState })), + reply(Name, From, Reply, NState, Debug), + exit(R); + Other -> + handle_common_reply(Other, Msg, GS2State) end; -handle_msg(Msg, - Parent, Name, State, Mod, TimeoutState, Queue, Debug) -> +handle_msg(Msg, GS2State = #gs2_state { mod = Mod, state = State }) -> Reply = (catch dispatch(Msg, Mod, State)), - handle_common_reply(Reply, Parent, Name, Msg, Mod, State, - TimeoutState, Queue, Debug). + handle_common_reply(Reply, Msg, GS2State). -handle_common_reply(Reply, Parent, Name, Msg, Mod, State, - TimeoutState, Queue) -> +handle_common_reply(Reply, Msg, GS2State = #gs2_state { name = Name, + debug = Debug}) -> case Reply of - {noreply, NState} -> - loop(Parent, Name, NState, Mod, infinity, TimeoutState, Queue, []); - {noreply, NState, Time1} -> - loop(Parent, Name, NState, Mod, Time1, TimeoutState, Queue, []); + {noreply, NState} -> + Debug1 = common_debug(Debug, fun print_event/3, Name, + {noreply, NState}), + loop(GS2State #gs2_state { state = NState, + time = infinity, + debug = Debug1 }); + {noreply, NState, Time1} -> + Debug1 = common_debug(Debug, fun print_event/3, Name, + {noreply, NState}), + loop(GS2State #gs2_state { state = NState, + time = Time1, + debug = Debug1 }); _ -> - handle_common_termination(Reply, Name, Msg, Mod, State, []) + handle_common_termination(Reply, Msg, GS2State) end. -handle_common_reply(Reply, Parent, Name, Msg, Mod, State, TimeoutState, Queue, - Debug) -> +handle_common_termination(Reply, Msg, GS2State) -> case Reply of - {noreply, NState} -> - Debug1 = sys:handle_debug(Debug, {?MODULE, print_event}, Name, - {noreply, NState}), - loop(Parent, Name, NState, Mod, infinity, TimeoutState, Queue, - Debug1); - {noreply, NState, Time1} -> - Debug1 = sys:handle_debug(Debug, {?MODULE, print_event}, Name, - {noreply, NState}), - loop(Parent, Name, NState, Mod, Time1, TimeoutState, Queue, Debug1); + {stop, Reason, NState} -> + terminate(Reason, Msg, GS2State #gs2_state { state = NState }); + {'EXIT', What} -> + terminate(What, Msg, GS2State); _ -> - handle_common_termination(Reply, Name, Msg, Mod, State, Debug) - end. - -handle_common_termination(Reply, Name, Msg, Mod, State, Debug) -> - case Reply of - {stop, Reason, NState} -> - terminate(Reason, Name, Msg, Mod, NState, Debug); - {'EXIT', What} -> - terminate(What, Name, Msg, Mod, State, Debug); - _ -> - terminate({bad_return_value, Reply}, Name, Msg, Mod, State, Debug) + terminate({bad_return_value, Reply}, Msg, GS2State) end. reply(Name, {To, Tag}, Reply, State, Debug) -> reply({To, Tag}, Reply), - sys:handle_debug(Debug, {?MODULE, print_event}, Name, - {out, Reply, To, State} ). + sys:handle_debug( + Debug, fun print_event/3, Name, {out, Reply, To, State}). %%----------------------------------------------------------------- %% Callback functions for system messages handling. %%----------------------------------------------------------------- -system_continue(Parent, Debug, [Name, State, Mod, Time, TimeoutState, Queue]) -> - loop(Parent, Name, State, Mod, Time, TimeoutState, Queue, Debug). +system_continue(Parent, Debug, GS2State) -> + loop(GS2State #gs2_state { parent = Parent, debug = Debug }). --ifdef(use_specs). --spec system_terminate(_, _, _, [_]) -> no_return(). --endif. - -system_terminate(Reason, _Parent, Debug, [Name, State, Mod, _Time, - _TimeoutState, _Queue]) -> - terminate(Reason, Name, [], Mod, State, Debug). +system_terminate(Reason, _Parent, Debug, GS2State) -> + terminate(Reason, [], GS2State #gs2_state { debug = Debug }). -system_code_change([Name, State, Mod, Time, TimeoutState, Queue], _Module, - OldVsn, Extra) -> +system_code_change(GS2State = #gs2_state { mod = Mod, + state = State }, + _Module, OldVsn, Extra) -> case catch Mod:code_change(OldVsn, State, Extra) of - {ok, NewState} -> - {ok, [Name, NewState, Mod, Time, TimeoutState, Queue]}; - Else -> + {ok, NewState} -> + NewGS2State = find_prioritisers( + GS2State #gs2_state { state = NewState }), + {ok, [NewGS2State]}; + Else -> Else end. @@ -951,18 +927,18 @@ %%----------------------------------------------------------------- print_event(Dev, {in, Msg}, Name) -> case Msg of - {'$gen_call', {From, _Tag}, Call} -> - io:format(Dev, "*DBG* ~p got call ~p from ~w~n", - [Name, Call, From]); - {'$gen_cast', Cast} -> - io:format(Dev, "*DBG* ~p got cast ~p~n", - [Name, Cast]); - _ -> - io:format(Dev, "*DBG* ~p got ~p~n", [Name, Msg]) + {'$gen_call', {From, _Tag}, Call} -> + io:format(Dev, "*DBG* ~p got call ~p from ~w~n", + [Name, Call, From]); + {'$gen_cast', Cast} -> + io:format(Dev, "*DBG* ~p got cast ~p~n", + [Name, Cast]); + _ -> + io:format(Dev, "*DBG* ~p got ~p~n", [Name, Msg]) end; print_event(Dev, {out, Msg, To, State}, Name) -> - io:format(Dev, "*DBG* ~p sent ~p to ~w, new state ~w~n", - [Name, Msg, To, State]); + io:format(Dev, "*DBG* ~p sent ~p to ~w, new state ~w~n", + [Name, Msg, To, State]); print_event(Dev, {noreply, State}, Name) -> io:format(Dev, "*DBG* ~p new state ~w~n", [Name, State]); print_event(Dev, Event, Name) -> @@ -973,56 +949,61 @@ %%% Terminate the server. %%% --------------------------------------------------- -terminate(Reason, Name, Msg, Mod, State, Debug) -> +terminate(Reason, Msg, #gs2_state { name = Name, + mod = Mod, + state = State, + debug = Debug }) -> case catch Mod:terminate(Reason, State) of - {'EXIT', R} -> - error_info(R, Name, Msg, State, Debug), - exit(R); - _ -> - case Reason of - normal -> - exit(normal); - shutdown -> - exit(shutdown); - {shutdown,_}=Shutdown -> - exit(Shutdown); - _ -> - error_info(Reason, Name, Msg, State, Debug), - exit(Reason) - end + {'EXIT', R} -> + error_info(R, Reason, Name, Msg, State, Debug), + exit(R); + _ -> + case Reason of + normal -> + exit(normal); + shutdown -> + exit(shutdown); + {shutdown,_}=Shutdown -> + exit(Shutdown); + _ -> + error_info(Reason, undefined, Name, Msg, State, Debug), + exit(Reason) + end end. -error_info(_Reason, application_controller, _Msg, _State, _Debug) -> +error_info(_Reason, _RootCause, application_controller, _Msg, _State, _Debug) -> %% OTP-5811 Don't send an error report if it's the system process %% application_controller which is terminating - let init take care %% of it instead ok; -error_info(Reason, Name, Msg, State, Debug) -> - Reason1 = - case Reason of - {undef,[{M,F,A}|MFAs]} -> - case code:is_loaded(M) of - false -> - {'module could not be loaded',[{M,F,A}|MFAs]}; - _ -> - case erlang:function_exported(M, F, length(A)) of - true -> - Reason; - false -> - {'function not exported',[{M,F,A}|MFAs]} - end - end; - _ -> - Reason - end, - format("** Generic server ~p terminating \n" - "** Last message in was ~p~n" - "** When Server state == ~p~n" - "** Reason for termination == ~n** ~p~n", - [Name, Msg, State, Reason1]), +error_info(Reason, RootCause, Name, Msg, State, Debug) -> + Reason1 = error_reason(Reason), + Fmt = + "** Generic server ~p terminating~n" + "** Last message in was ~p~n" + "** When Server state == ~p~n" + "** Reason for termination == ~n** ~p~n", + case RootCause of + undefined -> format(Fmt, [Name, Msg, State, Reason1]); + _ -> format(Fmt ++ "** In 'terminate' callback " + "with reason ==~n** ~p~n", + [Name, Msg, State, Reason1, + error_reason(RootCause)]) + end, sys:print_log(Debug), ok. +error_reason({undef,[{M,F,A}|MFAs]} = Reason) -> + case code:is_loaded(M) of + false -> {'module could not be loaded',[{M,F,A}|MFAs]}; + _ -> case erlang:function_exported(M, F, length(A)) of + true -> Reason; + false -> {'function not exported',[{M,F,A}|MFAs]} + end + end; +error_reason(Reason) -> + Reason. + %%% --------------------------------------------------- %%% Misc. functions. %%% --------------------------------------------------- @@ -1036,74 +1017,109 @@ debug_options(Name, Opts) -> case opt(debug, Opts) of - {ok, Options} -> dbg_options(Name, Options); - _ -> dbg_options(Name, []) + {ok, Options} -> dbg_options(Name, Options); + _ -> dbg_options(Name, []) end. dbg_options(Name, []) -> - Opts = - case init:get_argument(generic_debug) of - error -> - []; - _ -> - [log, statistics] - end, + Opts = + case init:get_argument(generic_debug) of + error -> + []; + _ -> + [log, statistics] + end, dbg_opts(Name, Opts); dbg_options(Name, Opts) -> dbg_opts(Name, Opts). dbg_opts(Name, Opts) -> case catch sys:debug_options(Opts) of - {'EXIT',_} -> - format("~p: ignoring erroneous debug options - ~p~n", - [Name, Opts]), - []; - Dbg -> - Dbg + {'EXIT',_} -> + format("~p: ignoring erroneous debug options - ~p~n", + [Name, Opts]), + []; + Dbg -> + Dbg end. get_proc_name(Pid) when is_pid(Pid) -> Pid; get_proc_name({local, Name}) -> case process_info(self(), registered_name) of - {registered_name, Name} -> - Name; - {registered_name, _Name} -> - exit(process_not_registered); - [] -> - exit(process_not_registered) - end; + {registered_name, Name} -> + Name; + {registered_name, _Name} -> + exit(process_not_registered); + [] -> + exit(process_not_registered) + end; get_proc_name({global, Name}) -> case global:safe_whereis_name(Name) of - undefined -> - exit(process_not_registered_globally); - Pid when Pid =:= self() -> - Name; - _Pid -> - exit(process_not_registered_globally) + undefined -> + exit(process_not_registered_globally); + Pid when Pid =:= self() -> + Name; + _Pid -> + exit(process_not_registered_globally) end. get_parent() -> case get('$ancestors') of - [Parent | _] when is_pid(Parent)-> + [Parent | _] when is_pid(Parent)-> Parent; [Parent | _] when is_atom(Parent)-> name_to_pid(Parent); - _ -> - exit(process_was_not_started_by_proc_lib) + _ -> + exit(process_was_not_started_by_proc_lib) end. name_to_pid(Name) -> case whereis(Name) of - undefined -> - case global:safe_whereis_name(Name) of - undefined -> - exit(could_not_find_registerd_name); - Pid -> - Pid - end; - Pid -> - Pid + undefined -> + case global:safe_whereis_name(Name) of + undefined -> + exit(could_not_find_registerd_name); + Pid -> + Pid + end; + Pid -> + Pid + end. + +find_prioritisers(GS2State = #gs2_state { mod = Mod }) -> + PrioriCall = function_exported_or_default( + Mod, 'prioritise_call', 3, + fun (_Msg, _From, _State) -> 0 end), + PrioriCast = function_exported_or_default(Mod, 'prioritise_cast', 2, + fun (_Msg, _State) -> 0 end), + PrioriInfo = function_exported_or_default(Mod, 'prioritise_info', 2, + fun (_Msg, _State) -> 0 end), + GS2State #gs2_state { prioritise_call = PrioriCall, + prioritise_cast = PrioriCast, + prioritise_info = PrioriInfo }. + +function_exported_or_default(Mod, Fun, Arity, Default) -> + case erlang:function_exported(Mod, Fun, Arity) of + true -> case Arity of + 2 -> fun (Msg, GS2State = #gs2_state { state = State }) -> + case catch Mod:Fun(Msg, State) of + Res when is_integer(Res) -> + Res; + Err -> + handle_common_termination(Err, Msg, GS2State) + end + end; + 3 -> fun (Msg, From, GS2State = #gs2_state { state = State }) -> + case catch Mod:Fun(Msg, From, State) of + Res when is_integer(Res) -> + Res; + Err -> + handle_common_termination(Err, Msg, GS2State) + end + end + end; + false -> Default end. %%----------------------------------------------------------------- @@ -1111,27 +1127,26 @@ %%----------------------------------------------------------------- format_status(Opt, StatusData) -> [PDict, SysState, Parent, Debug, - [Name, State, Mod, _Time, _TimeoutState, Queue]] = StatusData, + #gs2_state{name = Name, state = State, mod = Mod, queue = Queue}] = + StatusData, NameTag = if is_pid(Name) -> - pid_to_list(Name); - is_atom(Name) -> - Name - end, + pid_to_list(Name); + is_atom(Name) -> + Name + end, Header = lists:concat(["Status for generic server ", NameTag]), Log = sys:get_debug(log, Debug, []), - Specfic = - case erlang:function_exported(Mod, format_status, 2) of - true -> - case catch Mod:format_status(Opt, [PDict, State]) of - {'EXIT', _} -> [{data, [{"State", State}]}]; - Else -> Else - end; - _ -> - [{data, [{"State", State}]}] - end, + Specfic = + case erlang:function_exported(Mod, format_status, 2) of + true -> case catch Mod:format_status(Opt, [PDict, State]) of + {'EXIT', _} -> [{data, [{"State", State}]}]; + Else -> Else + end; + _ -> [{data, [{"State", State}]}] + end, [{header, Header}, {data, [{"Status", SysState}, - {"Parent", Parent}, - {"Logged events", Log}, + {"Parent", Parent}, + {"Logged events", Log}, {"Queued messages", priority_queue:to_list(Queue)}]} | Specfic]. diff -Nru rabbitmq-server-1.7.2/src/pg_local.erl rabbitmq-server-2.3.1/src/pg_local.erl --- rabbitmq-server-1.7.2/src/pg_local.erl 2010-02-15 16:01:03.000000000 +0000 +++ rabbitmq-server-2.3.1/src/pg_local.erl 2011-02-03 12:47:35.000000000 +0000 @@ -13,7 +13,7 @@ %% versions of Erlang/OTP. The remaining type specs have been %% removed. -%% All modifications are (C) 2010 LShift Ltd. +%% All modifications are (C) 2010-2011 VMware, Inc. %% %CopyrightBegin% %% @@ -36,8 +36,8 @@ -export([join/2, leave/2, get_members/1]). -export([sync/0]). %% intended for testing only; not part of official API --export([start/0,start_link/0,init/1,handle_call/3,handle_cast/2,handle_info/2, - terminate/2]). +-export([start/0, start_link/0, init/1, handle_call/3, handle_cast/2, + handle_info/2, terminate/2]). %%---------------------------------------------------------------------------- @@ -45,8 +45,8 @@ -type(name() :: term()). --spec(start_link/0 :: () -> {'ok', pid()} | {'error', term()}). --spec(start/0 :: () -> {'ok', pid()} | {'error', term()}). +-spec(start_link/0 :: () -> {'ok', pid()} | {'error', any()}). +-spec(start/0 :: () -> {'ok', pid()} | {'error', any()}). -spec(join/2 :: (name(), pid()) -> 'ok'). -spec(leave/2 :: (name(), pid()) -> 'ok'). -spec(get_members/1 :: (name()) -> [pid()]). @@ -206,7 +206,7 @@ case whereis(?MODULE) of undefined -> C = {pg_local, {?MODULE, start_link, []}, permanent, - 1000, worker, [?MODULE]}, + 16#ffffffff, worker, [?MODULE]}, supervisor:start_child(kernel_safe_sup, C); PgLocalPid -> {ok, PgLocalPid} diff -Nru rabbitmq-server-1.7.2/src/priority_queue.erl rabbitmq-server-2.3.1/src/priority_queue.erl --- rabbitmq-server-1.7.2/src/priority_queue.erl 2010-02-15 16:01:03.000000000 +0000 +++ rabbitmq-server-2.3.1/src/priority_queue.erl 2011-02-03 12:47:35.000000000 +0000 @@ -1,32 +1,17 @@ -%% The contents of this file are subject to the Mozilla Public License -%% Version 1.1 (the "License"); you may not use this file except in -%% compliance with the License. You may obtain a copy of the License at -%% http://www.mozilla.org/MPL/ -%% -%% Software distributed under the License is distributed on an "AS IS" -%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the -%% License for the specific language governing rights and limitations -%% under the License. -%% -%% The Original Code is RabbitMQ. -%% -%% The Initial Developers of the Original Code are LShift Ltd, -%% Cohesive Financial Technologies LLC, and Rabbit Technologies Ltd. -%% -%% Portions created before 22-Nov-2008 00:00:00 GMT by LShift Ltd, -%% Cohesive Financial Technologies LLC, or Rabbit Technologies Ltd -%% are Copyright (C) 2007-2008 LShift Ltd, Cohesive Financial -%% Technologies LLC, and Rabbit Technologies Ltd. -%% -%% Portions created by LShift Ltd are Copyright (C) 2007-2010 LShift -%% Ltd. Portions created by Cohesive Financial Technologies LLC are -%% Copyright (C) 2007-2010 Cohesive Financial Technologies -%% LLC. Portions created by Rabbit Technologies Ltd are Copyright -%% (C) 2007-2010 Rabbit Technologies Ltd. +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License +%% at http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and +%% limitations under the License. %% -%% All Rights Reserved. +%% The Original Code is RabbitMQ. %% -%% Contributor(s): ______________________________________. +%% The Initial Developer of the Original Code is VMware, Inc. +%% Copyright (c) 2007-2011 VMware, Inc. All rights reserved. %% %% Priority queues have essentially the same interface as ordinary diff -Nru rabbitmq-server-1.7.2/src/rabbit_access_control.erl rabbitmq-server-2.3.1/src/rabbit_access_control.erl --- rabbitmq-server-1.7.2/src/rabbit_access_control.erl 2010-02-15 16:01:03.000000000 +0000 +++ rabbitmq-server-2.3.1/src/rabbit_access_control.erl 2011-02-03 12:47:35.000000000 +0000 @@ -1,357 +1,137 @@ -%% The contents of this file are subject to the Mozilla Public License -%% Version 1.1 (the "License"); you may not use this file except in -%% compliance with the License. You may obtain a copy of the License at -%% http://www.mozilla.org/MPL/ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License +%% at http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and +%% limitations under the License. %% -%% Software distributed under the License is distributed on an "AS IS" -%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the -%% License for the specific language governing rights and limitations -%% under the License. +%% The Original Code is RabbitMQ. %% -%% The Original Code is RabbitMQ. -%% -%% The Initial Developers of the Original Code are LShift Ltd, -%% Cohesive Financial Technologies LLC, and Rabbit Technologies Ltd. -%% -%% Portions created before 22-Nov-2008 00:00:00 GMT by LShift Ltd, -%% Cohesive Financial Technologies LLC, or Rabbit Technologies Ltd -%% are Copyright (C) 2007-2008 LShift Ltd, Cohesive Financial -%% Technologies LLC, and Rabbit Technologies Ltd. -%% -%% Portions created by LShift Ltd are Copyright (C) 2007-2010 LShift -%% Ltd. Portions created by Cohesive Financial Technologies LLC are -%% Copyright (C) 2007-2010 Cohesive Financial Technologies -%% LLC. Portions created by Rabbit Technologies Ltd are Copyright -%% (C) 2007-2010 Rabbit Technologies Ltd. -%% -%% All Rights Reserved. -%% -%% Contributor(s): ______________________________________. +%% The Initial Developer of the Original Code is VMware, Inc. +%% Copyright (c) 2007-2011 VMware, Inc. All rights reserved. %% -module(rabbit_access_control). --include_lib("stdlib/include/qlc.hrl"). + -include("rabbit.hrl"). --export([check_login/2, user_pass_login/2, - check_vhost_access/2, check_resource_access/3]). --export([add_user/2, delete_user/1, change_password/2, list_users/0, - lookup_user/1]). --export([add_vhost/1, delete_vhost/1, list_vhosts/0]). --export([set_permissions/5, clear_permissions/2, - list_vhost_permissions/1, list_user_permissions/1]). +-export([user_pass_login/2, check_user_pass_login/2, check_user_login/2, + check_vhost_access/2, check_resource_access/3, list_vhosts/2]). %%---------------------------------------------------------------------------- -ifdef(use_specs). +-export_type([permission_atom/0, vhost_permission_atom/0]). + -type(permission_atom() :: 'configure' | 'read' | 'write'). +-type(vhost_permission_atom() :: 'read' | 'write'). --spec(check_login/2 :: (binary(), binary()) -> user()). --spec(user_pass_login/2 :: (username(), password()) -> user()). --spec(check_vhost_access/2 :: (user(), vhost()) -> 'ok'). +-spec(user_pass_login/2 :: + (rabbit_types:username(), rabbit_types:password()) + -> rabbit_types:user() | rabbit_types:channel_exit()). +-spec(check_user_pass_login/2 :: + (rabbit_types:username(), rabbit_types:password()) + -> {'ok', rabbit_types:user()} | {'refused', string(), [any()]}). +-spec(check_vhost_access/2 :: + (rabbit_types:user(), rabbit_types:vhost()) + -> 'ok' | rabbit_types:channel_exit()). -spec(check_resource_access/3 :: - (username(), r(atom()), permission_atom()) -> 'ok'). --spec(add_user/2 :: (username(), password()) -> 'ok'). --spec(delete_user/1 :: (username()) -> 'ok'). --spec(change_password/2 :: (username(), password()) -> 'ok'). --spec(list_users/0 :: () -> [username()]). --spec(lookup_user/1 :: (username()) -> {'ok', user()} | not_found()). --spec(add_vhost/1 :: (vhost()) -> 'ok'). --spec(delete_vhost/1 :: (vhost()) -> 'ok'). --spec(list_vhosts/0 :: () -> [vhost()]). --spec(set_permissions/5 :: - (username(), vhost(), regexp(), regexp(), regexp()) -> 'ok'). --spec(clear_permissions/2 :: (username(), vhost()) -> 'ok'). --spec(list_vhost_permissions/1 :: - (vhost()) -> [{username(), regexp(), regexp(), regexp()}]). --spec(list_user_permissions/1 :: - (username()) -> [{vhost(), regexp(), regexp(), regexp()}]). + (rabbit_types:user(), rabbit_types:r(atom()), permission_atom()) + -> 'ok' | rabbit_types:channel_exit()). +-spec(list_vhosts/2 :: (rabbit_types:user(), vhost_permission_atom()) + -> [rabbit_types:vhost()]). -endif. %%---------------------------------------------------------------------------- -%% SASL PLAIN, as used by the Qpid Java client and our clients. Also, -%% apparently, by OpenAMQ. -check_login(<<"PLAIN">>, Response) -> - [User, Pass] = [list_to_binary(T) || - T <- string:tokens(binary_to_list(Response), [0])], - user_pass_login(User, Pass); -%% AMQPLAIN, as used by Qpid Python test suite. The 0-8 spec actually -%% defines this as PLAIN, but in 0-9 that definition is gone, instead -%% referring generically to "SASL security mechanism", i.e. the above. -check_login(<<"AMQPLAIN">>, Response) -> - LoginTable = rabbit_binary_parser:parse_table(Response), - case {lists:keysearch(<<"LOGIN">>, 1, LoginTable), - lists:keysearch(<<"PASSWORD">>, 1, LoginTable)} of - {{value, {_, longstr, User}}, - {value, {_, longstr, Pass}}} -> - user_pass_login(User, Pass); - _ -> - %% Is this an information leak? - rabbit_misc:protocol_error( - access_refused, - "AMQPPLAIN auth info ~w is missing LOGIN or PASSWORD field", - [LoginTable]) - end; - -check_login(Mechanism, _Response) -> - rabbit_misc:protocol_error( - access_refused, "unsupported authentication mechanism '~s'", - [Mechanism]). - user_pass_login(User, Pass) -> ?LOGDEBUG("Login with user ~p pass ~p~n", [User, Pass]), - case lookup_user(User) of - {ok, U} -> - if - Pass == U#user.password -> U; - true -> - rabbit_misc:protocol_error( - access_refused, "login refused for user '~s'", [User]) - end; - {error, not_found} -> + case check_user_pass_login(User, Pass) of + {refused, Msg, Args} -> rabbit_misc:protocol_error( - access_refused, "login refused for user '~s'", [User]) + access_refused, "login refused: ~s", [io_lib:format(Msg, Args)]); + {ok, U} -> + U end. -internal_lookup_vhost_access(Username, VHostPath) -> - %% TODO: use dirty ops instead - rabbit_misc:execute_mnesia_transaction( - fun () -> - case mnesia:read({rabbit_user_permission, - #user_vhost{username = Username, - virtual_host = VHostPath}}) of - [] -> not_found; - [R] -> {ok, R} - end - end). +check_user_pass_login(Username, Password) -> + check_user_login(Username, [{password, Password}]). -check_vhost_access(#user{username = Username}, VHostPath) -> - ?LOGDEBUG("Checking VHost access for ~p to ~p~n", [Username, VHostPath]), - case internal_lookup_vhost_access(Username, VHostPath) of - {ok, _R} -> - ok; - not_found -> - rabbit_misc:protocol_error( - access_refused, "access to vhost '~s' refused for user '~s'", - [VHostPath, Username]) - end. +check_user_login(Username, AuthProps) -> + {ok, Modules} = application:get_env(rabbit, auth_backends), + lists:foldl( + fun(Module, {refused, _, _}) -> + case Module:check_user_login(Username, AuthProps) of + {error, E} -> + {refused, "~s failed authenticating ~s: ~p~n", + [Module, Username, E]}; + Else -> + Else + end; + (_, {ok, User}) -> + {ok, User} + end, {refused, "No modules checked '~s'", [Username]}, Modules). -permission_index(configure) -> #permission.configure; -permission_index(write) -> #permission.write; -permission_index(read) -> #permission.read. +check_vhost_access(User = #user{ username = Username, + auth_backend = Module }, VHostPath) -> + ?LOGDEBUG("Checking VHost access for ~p to ~p~n", [Username, VHostPath]), + check_access( + fun() -> + rabbit_vhost:exists(VHostPath) andalso + Module:check_vhost_access(User, VHostPath, write) + end, + "~s failed checking vhost access to ~s for ~s: ~p~n", + [Module, VHostPath, Username], + "access to vhost '~s' refused for user '~s'", + [VHostPath, Username]). -check_resource_access(Username, - R = #resource{kind = exchange, name = <<"">>}, +check_resource_access(User, R = #resource{kind = exchange, name = <<"">>}, Permission) -> - check_resource_access(Username, - R#resource{name = <<"amq.default">>}, + check_resource_access(User, R#resource{name = <<"amq.default">>}, Permission); -check_resource_access(_Username, - #resource{name = <<"amq.gen",_/binary>>}, - _Permission) -> - ok; -check_resource_access(Username, - R = #resource{virtual_host = VHostPath, name = Name}, - Permission) -> - Res = case mnesia:dirty_read({rabbit_user_permission, - #user_vhost{username = Username, - virtual_host = VHostPath}}) of - [] -> - false; - [#user_permission{permission = P}] -> - case regexp:match( - binary_to_list(Name), - binary_to_list(element(permission_index(Permission), P))) of - {match, _, _} -> true; - nomatch -> false - end - end, - if Res -> ok; - true -> rabbit_misc:protocol_error( - access_refused, "access to ~s refused for user '~s'", - [rabbit_misc:rs(R), Username]) - end. - -add_user(Username, Password) -> - R = rabbit_misc:execute_mnesia_transaction( - fun () -> - case mnesia:wread({rabbit_user, Username}) of - [] -> - ok = mnesia:write(rabbit_user, - #user{username = Username, - password = Password}, - write); - _ -> - mnesia:abort({user_already_exists, Username}) - end - end), - rabbit_log:info("Created user ~p~n", [Username]), - R. - -delete_user(Username) -> - R = rabbit_misc:execute_mnesia_transaction( - rabbit_misc:with_user( - Username, - fun () -> - ok = mnesia:delete({rabbit_user, Username}), - [ok = mnesia:delete_object( - rabbit_user_permission, R, write) || - R <- mnesia:match_object( - rabbit_user_permission, - #user_permission{user_vhost = #user_vhost{ - username = Username, - virtual_host = '_'}, - permission = '_'}, - write)], - ok - end)), - rabbit_log:info("Deleted user ~p~n", [Username]), - R. - -change_password(Username, Password) -> - R = rabbit_misc:execute_mnesia_transaction( - rabbit_misc:with_user( - Username, - fun () -> - ok = mnesia:write(rabbit_user, - #user{username = Username, - password = Password}, - write) - end)), - rabbit_log:info("Changed password for user ~p~n", [Username]), - R. - -list_users() -> - mnesia:dirty_all_keys(rabbit_user). - -lookup_user(Username) -> - rabbit_misc:dirty_read({rabbit_user, Username}). - -add_vhost(VHostPath) -> - R = rabbit_misc:execute_mnesia_transaction( - fun () -> - case mnesia:wread({rabbit_vhost, VHostPath}) of - [] -> - ok = mnesia:write(rabbit_vhost, - #vhost{virtual_host = VHostPath}, - write), - [rabbit_exchange:declare( - rabbit_misc:r(VHostPath, exchange, Name), - Type, true, false, []) || - {Name,Type} <- - [{<<"">>, direct}, - {<<"amq.direct">>, direct}, - {<<"amq.topic">>, topic}, - {<<"amq.match">>, headers}, %% per 0-9-1 pdf - {<<"amq.headers">>, headers}, %% per 0-9-1 xml - {<<"amq.fanout">>, fanout}]], - ok; - [_] -> - mnesia:abort({vhost_already_exists, VHostPath}) - end - end), - rabbit_log:info("Added vhost ~p~n", [VHostPath]), - R. - -delete_vhost(VHostPath) -> - %%FIXME: We are forced to delete the queues outside the TX below - %%because queue deletion involves sending messages to the queue - %%process, which in turn results in further mnesia actions and - %%eventually the termination of that process. - lists:foreach(fun (Q) -> - {ok,_} = rabbit_amqqueue:delete(Q, false, false) - end, - rabbit_amqqueue:list(VHostPath)), - R = rabbit_misc:execute_mnesia_transaction( - rabbit_misc:with_vhost( - VHostPath, - fun () -> - ok = internal_delete_vhost(VHostPath) - end)), - rabbit_log:info("Deleted vhost ~p~n", [VHostPath]), - R. - -internal_delete_vhost(VHostPath) -> - lists:foreach(fun (#exchange{name=Name}) -> - ok = rabbit_exchange:delete(Name, false) - end, - rabbit_exchange:list(VHostPath)), - lists:foreach(fun ({Username, _, _, _}) -> - ok = clear_permissions(Username, VHostPath) - end, - list_vhost_permissions(VHostPath)), - ok = mnesia:delete({rabbit_vhost, VHostPath}), - ok. - -list_vhosts() -> - mnesia:dirty_all_keys(rabbit_vhost). - -validate_regexp(RegexpBin) -> - Regexp = binary_to_list(RegexpBin), - case regexp:parse(Regexp) of - {ok, _} -> ok; - {error, Reason} -> throw({error, {invalid_regexp, Regexp, Reason}}) +check_resource_access(User = #user{username = Username, auth_backend = Module}, + Resource, Permission) -> + check_access( + fun() -> Module:check_resource_access(User, Resource, Permission) end, + "~s failed checking resource access to ~p for ~s: ~p~n", + [Module, Resource, Username], + "access to ~s refused for user '~s'", + [rabbit_misc:rs(Resource), Username]). + +check_access(Fun, ErrStr, ErrArgs, RefStr, RefArgs) -> + Allow = case Fun() of + {error, _} = E -> + rabbit_log:error(ErrStr, ErrArgs ++ [E]), + false; + Else -> + Else + end, + case Allow of + true -> + ok; + false -> + rabbit_misc:protocol_error(access_refused, RefStr, RefArgs) end. -set_permissions(Username, VHostPath, ConfigurePerm, WritePerm, ReadPerm) -> - lists:map(fun validate_regexp/1, [ConfigurePerm, WritePerm, ReadPerm]), - rabbit_misc:execute_mnesia_transaction( - rabbit_misc:with_user_and_vhost( - Username, VHostPath, - fun () -> ok = mnesia:write( - rabbit_user_permission, - #user_permission{user_vhost = #user_vhost{ - username = Username, - virtual_host = VHostPath}, - permission = #permission{ - configure = ConfigurePerm, - write = WritePerm, - read = ReadPerm}}, - write) - end)). - -clear_permissions(Username, VHostPath) -> - rabbit_misc:execute_mnesia_transaction( - rabbit_misc:with_user_and_vhost( - Username, VHostPath, - fun () -> - ok = mnesia:delete({rabbit_user_permission, - #user_vhost{username = Username, - virtual_host = VHostPath}}) - end)). - -list_vhost_permissions(VHostPath) -> - [{Username, ConfigurePerm, WritePerm, ReadPerm} || - {Username, _, ConfigurePerm, WritePerm, ReadPerm} <- - list_permissions(rabbit_misc:with_vhost( - VHostPath, match_user_vhost('_', VHostPath)))]. - -list_user_permissions(Username) -> - [{VHostPath, ConfigurePerm, WritePerm, ReadPerm} || - {_, VHostPath, ConfigurePerm, WritePerm, ReadPerm} <- - list_permissions(rabbit_misc:with_user( - Username, match_user_vhost(Username, '_')))]. - -list_permissions(QueryThunk) -> - [{Username, VHostPath, ConfigurePerm, WritePerm, ReadPerm} || - #user_permission{user_vhost = #user_vhost{username = Username, - virtual_host = VHostPath}, - permission = #permission{ - configure = ConfigurePerm, - write = WritePerm, - read = ReadPerm}} <- - %% TODO: use dirty ops instead - rabbit_misc:execute_mnesia_transaction(QueryThunk)]. - -match_user_vhost(Username, VHostPath) -> - fun () -> mnesia:match_object( - rabbit_user_permission, - #user_permission{user_vhost = #user_vhost{ - username = Username, - virtual_host = VHostPath}, - permission = '_'}, - read) - end. +%% Permission = write -> log in +%% Permission = read -> learn of the existence of (only relevant for +%% management plugin) +list_vhosts(User = #user{username = Username, auth_backend = Module}, + Permission) -> + lists:filter( + fun(VHost) -> + case Module:check_vhost_access(User, VHost, Permission) of + {error, _} = E -> + rabbit_log:warning("~w failed checking vhost access " + "to ~s for ~s: ~p~n", + [Module, VHost, Username, E]), + false; + Else -> + Else + end + end, rabbit_vhost:list()). diff -Nru rabbitmq-server-1.7.2/src/rabbit_alarm.erl rabbitmq-server-2.3.1/src/rabbit_alarm.erl --- rabbitmq-server-1.7.2/src/rabbit_alarm.erl 2010-02-15 16:01:03.000000000 +0000 +++ rabbitmq-server-2.3.1/src/rabbit_alarm.erl 2011-02-03 12:47:35.000000000 +0000 @@ -1,32 +1,17 @@ -%% The contents of this file are subject to the Mozilla Public License -%% Version 1.1 (the "License"); you may not use this file except in -%% compliance with the License. You may obtain a copy of the License at -%% http://www.mozilla.org/MPL/ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License +%% at http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and +%% limitations under the License. %% -%% Software distributed under the License is distributed on an "AS IS" -%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the -%% License for the specific language governing rights and limitations -%% under the License. +%% The Original Code is RabbitMQ. %% -%% The Original Code is RabbitMQ. -%% -%% The Initial Developers of the Original Code are LShift Ltd, -%% Cohesive Financial Technologies LLC, and Rabbit Technologies Ltd. -%% -%% Portions created before 22-Nov-2008 00:00:00 GMT by LShift Ltd, -%% Cohesive Financial Technologies LLC, or Rabbit Technologies Ltd -%% are Copyright (C) 2007-2008 LShift Ltd, Cohesive Financial -%% Technologies LLC, and Rabbit Technologies Ltd. -%% -%% Portions created by LShift Ltd are Copyright (C) 2007-2010 LShift -%% Ltd. Portions created by Cohesive Financial Technologies LLC are -%% Copyright (C) 2007-2010 Cohesive Financial Technologies -%% LLC. Portions created by Rabbit Technologies Ltd are Copyright -%% (C) 2007-2010 Rabbit Technologies Ltd. -%% -%% All Rights Reserved. -%% -%% Contributor(s): ______________________________________. +%% The Initial Developer of the Original Code is VMware, Inc. +%% Copyright (c) 2007-2011 VMware, Inc. All rights reserved. %% -module(rabbit_alarm). @@ -47,7 +32,7 @@ -type(mfa_tuple() :: {atom(), atom(), list()}). -spec(start/0 :: () -> 'ok'). -spec(stop/0 :: () -> 'ok'). --spec(register/2 :: (pid(), mfa_tuple()) -> 'ok'). +-spec(register/2 :: (pid(), mfa_tuple()) -> boolean()). -endif. @@ -57,10 +42,9 @@ ok = alarm_handler:add_alarm_handler(?MODULE, []), {ok, MemoryWatermark} = application:get_env(vm_memory_high_watermark), ok = case MemoryWatermark == 0 of - true -> - ok; - false -> - rabbit_sup:start_child(vm_memory_monitor, [MemoryWatermark]) + true -> ok; + false -> rabbit_sup:start_restartable_child(vm_memory_monitor, + [MemoryWatermark]) end, ok. @@ -68,9 +52,9 @@ ok = alarm_handler:delete_alarm_handler(?MODULE). register(Pid, HighMemMFA) -> - ok = gen_event:call(alarm_handler, ?MODULE, - {register, Pid, HighMemMFA}, - infinity). + gen_event:call(alarm_handler, ?MODULE, + {register, Pid, HighMemMFA}, + infinity). %%---------------------------------------------------------------------------- @@ -85,7 +69,8 @@ false -> ok end, NewAlertees = dict:store(Pid, HighMemMFA, Alertess), - {ok, ok, State#alarms{alertees = NewAlertees}}; + {ok, State#alarms.vm_memory_high_watermark, + State#alarms{alertees = NewAlertees}}; handle_call(_Request, State) -> {ok, not_understood, State}. diff -Nru rabbitmq-server-1.7.2/src/rabbit_amqqueue.erl rabbitmq-server-2.3.1/src/rabbit_amqqueue.erl --- rabbitmq-server-1.7.2/src/rabbit_amqqueue.erl 2010-02-15 16:01:03.000000000 +0000 +++ rabbitmq-server-2.3.1/src/rabbit_amqqueue.erl 2011-02-03 12:47:35.000000000 +0000 @@ -1,196 +1,231 @@ -%% The contents of this file are subject to the Mozilla Public License -%% Version 1.1 (the "License"); you may not use this file except in -%% compliance with the License. You may obtain a copy of the License at -%% http://www.mozilla.org/MPL/ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License +%% at http://www.mozilla.org/MPL/ %% -%% Software distributed under the License is distributed on an "AS IS" -%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the -%% License for the specific language governing rights and limitations -%% under the License. +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and +%% limitations under the License. %% -%% The Original Code is RabbitMQ. +%% The Original Code is RabbitMQ. %% -%% The Initial Developers of the Original Code are LShift Ltd, -%% Cohesive Financial Technologies LLC, and Rabbit Technologies Ltd. -%% -%% Portions created before 22-Nov-2008 00:00:00 GMT by LShift Ltd, -%% Cohesive Financial Technologies LLC, or Rabbit Technologies Ltd -%% are Copyright (C) 2007-2008 LShift Ltd, Cohesive Financial -%% Technologies LLC, and Rabbit Technologies Ltd. -%% -%% Portions created by LShift Ltd are Copyright (C) 2007-2010 LShift -%% Ltd. Portions created by Cohesive Financial Technologies LLC are -%% Copyright (C) 2007-2010 Cohesive Financial Technologies -%% LLC. Portions created by Rabbit Technologies Ltd are Copyright -%% (C) 2007-2010 Rabbit Technologies Ltd. -%% -%% All Rights Reserved. -%% -%% Contributor(s): ______________________________________. +%% The Initial Developer of the Original Code is VMware, Inc. +%% Copyright (c) 2007-2011 VMware, Inc. All rights reserved. %% -module(rabbit_amqqueue). --export([start/0, recover/0, declare/4, delete/3, purge/1]). --export([internal_declare/2, internal_delete/1]). +-export([start/0, stop/0, declare/5, delete_immediately/1, delete/3, purge/1]). +-export([internal_declare/2, internal_delete/1, + maybe_run_queue_via_backing_queue/2, + maybe_run_queue_via_backing_queue_async/2, + sync_timeout/1, update_ram_duration/1, set_ram_duration_target/2, + set_maximum_since_use/2, maybe_expire/1, drop_expired/1]). -export([pseudo_queue/2]). --export([lookup/1, with/2, with_or_die/2, - stat/1, stat_all/0, deliver/2, redeliver/2, requeue/3, ack/4]). +-export([lookup/1, with/2, with_or_die/2, assert_equivalence/5, + check_exclusive_access/2, with_exclusive_access_or_die/3, + stat/1, deliver/2, requeue/3, ack/4, reject/4]). -export([list/1, info_keys/0, info/1, info/2, info_all/1, info_all/2]). +-export([emit_stats/1]). -export([consumers/1, consumers_all/1]). --export([claim_queue/2]). --export([basic_get/3, basic_consume/8, basic_cancel/4]). --export([notify_sent/2, unblock/2]). --export([commit_all/2, rollback_all/2, notify_down_all/2, limit_all/3]). +-export([basic_get/3, basic_consume/7, basic_cancel/4]). +-export([notify_sent/2, unblock/2, flush_all/2]). +-export([commit_all/3, rollback_all/3, notify_down_all/2, limit_all/3]). -export([on_node_down/1]). --import(mnesia). --import(gen_server2). --import(lists). --import(queue). - -include("rabbit.hrl"). -include_lib("stdlib/include/qlc.hrl"). +-define(INTEGER_ARG_TYPES, [byte, short, signedint, long]). + %%---------------------------------------------------------------------------- -ifdef(use_specs). --type(qstats() :: {'ok', queue_name(), non_neg_integer(), non_neg_integer()}). --type(qlen() :: {'ok', non_neg_integer()}). --type(qfun(A) :: fun ((amqqueue()) -> A)). +-export_type([name/0, qmsg/0]). + +-type(name() :: rabbit_types:r('queue')). + +-type(qlen() :: rabbit_types:ok(non_neg_integer())). +-type(qfun(A) :: fun ((rabbit_types:amqqueue()) -> A)). +-type(qmsg() :: {name(), pid(), msg_id(), boolean(), rabbit_types:message()}). +-type(msg_id() :: non_neg_integer()). -type(ok_or_errors() :: 'ok' | {'error', [{'error' | 'exit' | 'throw', any()}]}). +-type(queue_or_not_found() :: rabbit_types:amqqueue() | 'not_found'). + -spec(start/0 :: () -> 'ok'). --spec(recover/0 :: () -> 'ok'). --spec(declare/4 :: (queue_name(), boolean(), boolean(), amqp_table()) -> - amqqueue()). --spec(lookup/1 :: (queue_name()) -> {'ok', amqqueue()} | not_found()). --spec(with/2 :: (queue_name(), qfun(A)) -> A | not_found()). --spec(with_or_die/2 :: (queue_name(), qfun(A)) -> A). --spec(list/1 :: (vhost()) -> [amqqueue()]). --spec(info_keys/0 :: () -> [info_key()]). --spec(info/1 :: (amqqueue()) -> [info()]). --spec(info/2 :: (amqqueue(), [info_key()]) -> [info()]). --spec(info_all/1 :: (vhost()) -> [[info()]]). --spec(info_all/2 :: (vhost(), [info_key()]) -> [[info()]]). --spec(consumers/1 :: (amqqueue()) -> [{pid(), ctag(), boolean()}]). +-spec(stop/0 :: () -> 'ok'). +-spec(declare/5 :: + (name(), boolean(), boolean(), + rabbit_framing:amqp_table(), rabbit_types:maybe(pid())) + -> {'new' | 'existing', rabbit_types:amqqueue()} | + rabbit_types:channel_exit()). +-spec(lookup/1 :: + (name()) -> rabbit_types:ok(rabbit_types:amqqueue()) | + rabbit_types:error('not_found')). +-spec(with/2 :: (name(), qfun(A)) -> A | rabbit_types:error('not_found')). +-spec(with_or_die/2 :: + (name(), qfun(A)) -> A | rabbit_types:channel_exit()). +-spec(assert_equivalence/5 :: + (rabbit_types:amqqueue(), boolean(), boolean(), + rabbit_framing:amqp_table(), rabbit_types:maybe(pid())) + -> 'ok' | rabbit_types:channel_exit() | + rabbit_types:connection_exit()). +-spec(check_exclusive_access/2 :: + (rabbit_types:amqqueue(), pid()) + -> 'ok' | rabbit_types:channel_exit()). +-spec(with_exclusive_access_or_die/3 :: + (name(), pid(), qfun(A)) -> A | rabbit_types:channel_exit()). +-spec(list/1 :: (rabbit_types:vhost()) -> [rabbit_types:amqqueue()]). +-spec(info_keys/0 :: () -> rabbit_types:info_keys()). +-spec(info/1 :: (rabbit_types:amqqueue()) -> rabbit_types:infos()). +-spec(info/2 :: + (rabbit_types:amqqueue(), rabbit_types:info_keys()) + -> rabbit_types:infos()). +-spec(info_all/1 :: (rabbit_types:vhost()) -> [rabbit_types:infos()]). +-spec(info_all/2 :: (rabbit_types:vhost(), rabbit_types:info_keys()) + -> [rabbit_types:infos()]). +-spec(consumers/1 :: + (rabbit_types:amqqueue()) + -> [{pid(), rabbit_types:ctag(), boolean()}]). -spec(consumers_all/1 :: - (vhost()) -> [{queue_name(), pid(), ctag(), boolean()}]). --spec(stat/1 :: (amqqueue()) -> qstats()). --spec(stat_all/0 :: () -> [qstats()]). + (rabbit_types:vhost()) + -> [{name(), pid(), rabbit_types:ctag(), boolean()}]). +-spec(stat/1 :: + (rabbit_types:amqqueue()) + -> {'ok', non_neg_integer(), non_neg_integer()}). +-spec(emit_stats/1 :: (rabbit_types:amqqueue()) -> 'ok'). +-spec(delete_immediately/1 :: (rabbit_types:amqqueue()) -> 'ok'). -spec(delete/3 :: - (amqqueue(), 'false', 'false') -> qlen(); - (amqqueue(), 'true' , 'false') -> qlen() | {'error', 'in_use'}; - (amqqueue(), 'false', 'true' ) -> qlen() | {'error', 'not_empty'}; - (amqqueue(), 'true' , 'true' ) -> qlen() | - {'error', 'in_use'} | - {'error', 'not_empty'}). --spec(purge/1 :: (amqqueue()) -> qlen()). --spec(deliver/2 :: (pid(), delivery()) -> boolean()). --spec(redeliver/2 :: (pid(), [{message(), boolean()}]) -> 'ok'). + (rabbit_types:amqqueue(), 'false', 'false') + -> qlen(); + (rabbit_types:amqqueue(), 'true' , 'false') + -> qlen() | rabbit_types:error('in_use'); + (rabbit_types:amqqueue(), 'false', 'true' ) + -> qlen() | rabbit_types:error('not_empty'); + (rabbit_types:amqqueue(), 'true' , 'true' ) + -> qlen() | + rabbit_types:error('in_use') | + rabbit_types:error('not_empty')). +-spec(purge/1 :: (rabbit_types:amqqueue()) -> qlen()). +-spec(deliver/2 :: (pid(), rabbit_types:delivery()) -> boolean()). -spec(requeue/3 :: (pid(), [msg_id()], pid()) -> 'ok'). --spec(ack/4 :: (pid(), maybe(txn()), [msg_id()], pid()) -> 'ok'). --spec(commit_all/2 :: ([pid()], txn()) -> ok_or_errors()). --spec(rollback_all/2 :: ([pid()], txn()) -> ok_or_errors()). +-spec(ack/4 :: + (pid(), rabbit_types:maybe(rabbit_types:txn()), [msg_id()], pid()) + -> 'ok'). +-spec(reject/4 :: (pid(), [msg_id()], boolean(), pid()) -> 'ok'). +-spec(commit_all/3 :: ([pid()], rabbit_types:txn(), pid()) -> ok_or_errors()). +-spec(rollback_all/3 :: ([pid()], rabbit_types:txn(), pid()) -> 'ok'). -spec(notify_down_all/2 :: ([pid()], pid()) -> ok_or_errors()). -spec(limit_all/3 :: ([pid()], pid(), pid() | 'undefined') -> ok_or_errors()). --spec(claim_queue/2 :: (amqqueue(), pid()) -> 'ok' | 'locked'). --spec(basic_get/3 :: (amqqueue(), pid(), boolean()) -> - {'ok', non_neg_integer(), msg()} | 'empty'). --spec(basic_consume/8 :: - (amqqueue(), boolean(), pid(), pid(), pid() | 'undefined', ctag(), - boolean(), any()) -> - 'ok' | {'error', 'queue_owned_by_another_connection' | - 'exclusive_consume_unavailable'}). --spec(basic_cancel/4 :: (amqqueue(), pid(), ctag(), any()) -> 'ok'). +-spec(basic_get/3 :: (rabbit_types:amqqueue(), pid(), boolean()) -> + {'ok', non_neg_integer(), qmsg()} | 'empty'). +-spec(basic_consume/7 :: + (rabbit_types:amqqueue(), boolean(), pid(), pid() | 'undefined', + rabbit_types:ctag(), boolean(), any()) + -> rabbit_types:ok_or_error('exclusive_consume_unavailable')). +-spec(basic_cancel/4 :: + (rabbit_types:amqqueue(), pid(), rabbit_types:ctag(), any()) -> 'ok'). -spec(notify_sent/2 :: (pid(), pid()) -> 'ok'). -spec(unblock/2 :: (pid(), pid()) -> 'ok'). --spec(internal_declare/2 :: (amqqueue(), boolean()) -> amqqueue()). --spec(internal_delete/1 :: (queue_name()) -> 'ok' | not_found()). --spec(on_node_down/1 :: (erlang_node()) -> 'ok'). --spec(pseudo_queue/2 :: (binary(), pid()) -> amqqueue()). +-spec(flush_all/2 :: ([pid()], pid()) -> 'ok'). +-spec(internal_declare/2 :: + (rabbit_types:amqqueue(), boolean()) + -> queue_or_not_found() | rabbit_misc:thunk(queue_or_not_found())). +-spec(internal_delete/1 :: + (name()) -> rabbit_types:ok_or_error('not_found') | + rabbit_types:connection_exit() | + fun ((boolean()) -> rabbit_types:ok_or_error('not_found') | + rabbit_types:connection_exit())). +-spec(maybe_run_queue_via_backing_queue/2 :: + (pid(), (fun ((A) -> {[rabbit_guid:guid()], A}))) -> 'ok'). +-spec(maybe_run_queue_via_backing_queue_async/2 :: + (pid(), (fun ((A) -> {[rabbit_guid:guid()], A}))) -> 'ok'). +-spec(sync_timeout/1 :: (pid()) -> 'ok'). +-spec(update_ram_duration/1 :: (pid()) -> 'ok'). +-spec(set_ram_duration_target/2 :: (pid(), number() | 'infinity') -> 'ok'). +-spec(set_maximum_since_use/2 :: (pid(), non_neg_integer()) -> 'ok'). +-spec(maybe_expire/1 :: (pid()) -> 'ok'). +-spec(on_node_down/1 :: (node()) -> 'ok'). +-spec(pseudo_queue/2 :: (name(), pid()) -> rabbit_types:amqqueue()). -endif. %%---------------------------------------------------------------------------- start() -> + DurableQueues = find_durable_queues(), + {ok, BQ} = application:get_env(rabbit, backing_queue_module), + ok = BQ:start([QName || #amqqueue{name = QName} <- DurableQueues]), {ok,_} = supervisor:start_child( rabbit_sup, {rabbit_amqqueue_sup, {rabbit_amqqueue_sup, start_link, []}, transient, infinity, supervisor, [rabbit_amqqueue_sup]}), + _RealDurableQueues = recover_durable_queues(DurableQueues), ok. -recover() -> - ok = recover_durable_queues(), - ok. +stop() -> + ok = supervisor:terminate_child(rabbit_sup, rabbit_amqqueue_sup), + ok = supervisor:delete_child(rabbit_sup, rabbit_amqqueue_sup), + {ok, BQ} = application:get_env(rabbit, backing_queue_module), + ok = BQ:stop(). -recover_durable_queues() -> +find_durable_queues() -> Node = node(), - lists:foreach( - fun (RecoveredQ) -> - Q = start_queue_process(RecoveredQ), - %% We need to catch the case where a client connected to - %% another node has deleted the queue (and possibly - %% re-created it). - case rabbit_misc:execute_mnesia_transaction( - fun () -> case mnesia:match_object( - rabbit_durable_queue, RecoveredQ, read) of - [_] -> ok = store_queue(Q), - true; - [] -> false - end - end) of - true -> ok; - false -> exit(Q#amqqueue.pid, shutdown) - end - end, - %% TODO: use dirty ops instead - rabbit_misc:execute_mnesia_transaction( - fun () -> - qlc:e(qlc:q([Q || Q = #amqqueue{pid = Pid} - <- mnesia:table(rabbit_durable_queue), - node(Pid) == Node])) - end)), - ok. + %% TODO: use dirty ops instead + rabbit_misc:execute_mnesia_transaction( + fun () -> + qlc:e(qlc:q([Q || Q = #amqqueue{pid = Pid} + <- mnesia:table(rabbit_durable_queue), + node(Pid) == Node])) + end). -declare(QueueName, Durable, AutoDelete, Args) -> +recover_durable_queues(DurableQueues) -> + Qs = [start_queue_process(Q) || Q <- DurableQueues], + [Q || Q <- Qs, + gen_server2:call(Q#amqqueue.pid, {init, true}, infinity) == Q]. + +declare(QueueName, Durable, AutoDelete, Args, Owner) -> + ok = check_declare_arguments(QueueName, Args), Q = start_queue_process(#amqqueue{name = QueueName, durable = Durable, auto_delete = AutoDelete, arguments = Args, + exclusive_owner = Owner, pid = none}), - internal_declare(Q, true). - -internal_declare(Q = #amqqueue{name = QueueName}, WantDefaultBinding) -> - case rabbit_misc:execute_mnesia_transaction( - fun () -> - case mnesia:wread({rabbit_queue, QueueName}) of - [] -> - case mnesia:read( - {rabbit_durable_queue, QueueName}) of - [] -> ok = store_queue(Q), - case WantDefaultBinding of - true -> add_default_binding(Q); - false -> ok - end, - Q; - [_] -> not_found %% existing Q on stopped node - end; - [ExistingQ] -> - ExistingQ - end - end) of - not_found -> exit(Q#amqqueue.pid, shutdown), - rabbit_misc:not_found(QueueName); - Q -> Q; - ExistingQ -> exit(Q#amqqueue.pid, shutdown), - ExistingQ + case gen_server2:call(Q#amqqueue.pid, {init, false}) of + not_found -> rabbit_misc:not_found(QueueName); + Q1 -> Q1 end. +internal_declare(Q, true) -> + rabbit_misc:execute_mnesia_tx_with_tail( + fun () -> ok = store_queue(Q), rabbit_misc:const(Q) end); +internal_declare(Q = #amqqueue{name = QueueName}, false) -> + rabbit_misc:execute_mnesia_tx_with_tail( + fun () -> + case mnesia:wread({rabbit_queue, QueueName}) of + [] -> + case mnesia:read({rabbit_durable_queue, QueueName}) of + [] -> ok = store_queue(Q), + B = add_default_binding(Q), + fun (Tx) -> B(Tx), Q end; + [_] -> %% Q exists on stopped node + rabbit_misc:const(not_found) + end; + [ExistingQ = #amqqueue{pid = QPid}] -> + case is_process_alive(QPid) of + true -> rabbit_misc:const(ExistingQ); + false -> TailFun = internal_delete(QueueName), + fun (Tx) -> TailFun(Tx), ExistingQ end + end + end + end). + store_queue(Q = #amqqueue{durable = true}) -> ok = mnesia:write(rabbit_durable_queue, Q, write), ok = mnesia:write(rabbit_queue, Q, write), @@ -200,14 +235,16 @@ ok. start_queue_process(Q) -> - {ok, Pid} = supervisor:start_child(rabbit_amqqueue_sup, [Q]), + {ok, Pid} = rabbit_amqqueue_sup:start_child([Q]), Q#amqqueue{pid = Pid}. add_default_binding(#amqqueue{name = QueueName}) -> - Exchange = rabbit_misc:r(QueueName, exchange, <<>>), + ExchangeName = rabbit_misc:r(QueueName, exchange, <<>>), RoutingKey = QueueName#resource.name, - rabbit_exchange:add_binding(Exchange, QueueName, RoutingKey, []), - ok. + rabbit_binding:add(#binding{source = ExchangeName, + destination = QueueName, + key = RoutingKey, + args = []}). lookup(Name) -> rabbit_misc:dirty_read({rabbit_queue, Name}). @@ -223,6 +260,70 @@ with_or_die(Name, F) -> with(Name, F, fun () -> rabbit_misc:not_found(Name) end). +assert_equivalence(#amqqueue{durable = Durable, + auto_delete = AutoDelete} = Q, + Durable, AutoDelete, RequiredArgs, Owner) -> + assert_args_equivalence(Q, RequiredArgs), + check_exclusive_access(Q, Owner, strict); +assert_equivalence(#amqqueue{name = QueueName}, + _Durable, _AutoDelete, _RequiredArgs, _Owner) -> + rabbit_misc:protocol_error( + precondition_failed, "parameters for ~s not equivalent", + [rabbit_misc:rs(QueueName)]). + +check_exclusive_access(Q, Owner) -> check_exclusive_access(Q, Owner, lax). + +check_exclusive_access(#amqqueue{exclusive_owner = Owner}, Owner, _MatchType) -> + ok; +check_exclusive_access(#amqqueue{exclusive_owner = none}, _ReaderPid, lax) -> + ok; +check_exclusive_access(#amqqueue{name = QueueName}, _ReaderPid, _MatchType) -> + rabbit_misc:protocol_error( + resource_locked, + "cannot obtain exclusive access to locked ~s", + [rabbit_misc:rs(QueueName)]). + +with_exclusive_access_or_die(Name, ReaderPid, F) -> + with_or_die(Name, + fun (Q) -> check_exclusive_access(Q, ReaderPid), F(Q) end). + +assert_args_equivalence(#amqqueue{name = QueueName, arguments = Args}, + RequiredArgs) -> + rabbit_misc:assert_args_equivalence(Args, RequiredArgs, QueueName, + [<<"x-expires">>]). + +check_declare_arguments(QueueName, Args) -> + [case Fun(rabbit_misc:table_lookup(Args, Key)) of + ok -> ok; + {error, Error} -> rabbit_misc:protocol_error( + precondition_failed, + "invalid arg '~s' for ~s: ~w", + [Key, rabbit_misc:rs(QueueName), Error]) + end || {Key, Fun} <- + [{<<"x-expires">>, fun check_expires_argument/1}, + {<<"x-message-ttl">>, fun check_message_ttl_argument/1}]], + ok. + +check_expires_argument(Val) -> + check_integer_argument(Val, + expires_not_of_acceptable_type, + expires_zero_or_less). + +check_message_ttl_argument(Val) -> + check_integer_argument(Val, + ttl_not_of_acceptable_type, + ttl_zero_or_less). + +check_integer_argument(undefined, _, _) -> + ok; +check_integer_argument({Type, Val}, InvalidTypeError, _) when Val > 0 -> + case lists:member(Type, ?INTEGER_ARG_TYPES) of + true -> ok; + false -> {error, {InvalidTypeError, Type, Val}} + end; +check_integer_argument({_Type, _Val}, _, ZeroOrLessError) -> + {error, ZeroOrLessError}. + list(VHostPath) -> mnesia:dirty_match_object( rabbit_queue, @@ -233,10 +334,10 @@ map(VHostPath, F) -> rabbit_misc:filter_exit_map(F, list(VHostPath)). info(#amqqueue{ pid = QPid }) -> - gen_server2:pcall(QPid, 9, info, infinity). + delegate_call(QPid, info, infinity). info(#amqqueue{ pid = QPid }, Items) -> - case gen_server2:pcall(QPid, 9, {info, Items}, infinity) of + case delegate_call(QPid, {info, Items}, infinity) of {ok, Res} -> Res; {error, Error} -> throw(Error) end. @@ -246,123 +347,151 @@ info_all(VHostPath, Items) -> map(VHostPath, fun (Q) -> info(Q, Items) end). consumers(#amqqueue{ pid = QPid }) -> - gen_server2:pcall(QPid, 9, consumers, infinity). + delegate_call(QPid, consumers, infinity). consumers_all(VHostPath) -> - lists:concat( + lists:append( map(VHostPath, fun (Q) -> [{Q#amqqueue.name, ChPid, ConsumerTag, AckRequired} || {ChPid, ConsumerTag, AckRequired} <- consumers(Q)] end)). -stat(#amqqueue{pid = QPid}) -> gen_server2:call(QPid, stat, infinity). +stat(#amqqueue{pid = QPid}) -> delegate_call(QPid, stat, infinity). + +emit_stats(#amqqueue{pid = QPid}) -> + delegate_cast(QPid, emit_stats). -stat_all() -> - lists:map(fun stat/1, rabbit_misc:dirty_read_all(rabbit_queue)). +delete_immediately(#amqqueue{ pid = QPid }) -> + gen_server2:cast(QPid, delete_immediately). delete(#amqqueue{ pid = QPid }, IfUnused, IfEmpty) -> - gen_server2:call(QPid, {delete, IfUnused, IfEmpty}, infinity). + delegate_call(QPid, {delete, IfUnused, IfEmpty}, infinity). -purge(#amqqueue{ pid = QPid }) -> gen_server2:call(QPid, purge, infinity). +purge(#amqqueue{ pid = QPid }) -> delegate_call(QPid, purge, infinity). -deliver(QPid, #delivery{immediate = true, - txn = Txn, sender = ChPid, message = Message}) -> - gen_server2:call(QPid, {deliver_immediately, Txn, Message, ChPid}, - infinity); -deliver(QPid, #delivery{mandatory = true, - txn = Txn, sender = ChPid, message = Message}) -> - gen_server2:call(QPid, {deliver, Txn, Message, ChPid}, infinity), +deliver(QPid, Delivery = #delivery{immediate = true}) -> + gen_server2:call(QPid, {deliver_immediately, Delivery}, infinity); +deliver(QPid, Delivery = #delivery{mandatory = true}) -> + gen_server2:call(QPid, {deliver, Delivery}, infinity), true; -deliver(QPid, #delivery{txn = Txn, sender = ChPid, message = Message}) -> - gen_server2:cast(QPid, {deliver, Txn, Message, ChPid}), +deliver(QPid, Delivery) -> + gen_server2:cast(QPid, {deliver, Delivery}), true. -redeliver(QPid, Messages) -> - gen_server2:cast(QPid, {redeliver, Messages}). - requeue(QPid, MsgIds, ChPid) -> - gen_server2:cast(QPid, {requeue, MsgIds, ChPid}). + delegate_call(QPid, {requeue, MsgIds, ChPid}, infinity). ack(QPid, Txn, MsgIds, ChPid) -> - gen_server2:pcast(QPid, 8, {ack, Txn, MsgIds, ChPid}). + delegate_cast(QPid, {ack, Txn, MsgIds, ChPid}). -commit_all(QPids, Txn) -> - safe_pmap_ok( - fun (QPid) -> exit({queue_disappeared, QPid}) end, - fun (QPid) -> gen_server2:call(QPid, {commit, Txn}, infinity) end, - QPids). +reject(QPid, MsgIds, Requeue, ChPid) -> + delegate_cast(QPid, {reject, MsgIds, Requeue, ChPid}). -rollback_all(QPids, Txn) -> - safe_pmap_ok( - fun (QPid) -> exit({queue_disappeared, QPid}) end, - fun (QPid) -> gen_server2:cast(QPid, {rollback, Txn}) end, +commit_all(QPids, Txn, ChPid) -> + safe_delegate_call_ok( + fun (QPid) -> gen_server2:call(QPid, {commit, Txn, ChPid}, infinity) end, QPids). +rollback_all(QPids, Txn, ChPid) -> + delegate:invoke_no_result( + QPids, fun (QPid) -> gen_server2:cast(QPid, {rollback, Txn, ChPid}) end). + notify_down_all(QPids, ChPid) -> - safe_pmap_ok( - %% we don't care if the queue process has terminated in the - %% meantime - fun (_) -> ok end, + safe_delegate_call_ok( fun (QPid) -> gen_server2:call(QPid, {notify_down, ChPid}, infinity) end, QPids). limit_all(QPids, ChPid, LimiterPid) -> - safe_pmap_ok( - fun (_) -> ok end, - fun (QPid) -> gen_server2:cast(QPid, {limit, ChPid, LimiterPid}) end, - QPids). - -claim_queue(#amqqueue{pid = QPid}, ReaderPid) -> - gen_server2:call(QPid, {claim_queue, ReaderPid}, infinity). + delegate:invoke_no_result( + QPids, fun (QPid) -> + gen_server2:cast(QPid, {limit, ChPid, LimiterPid}) + end). basic_get(#amqqueue{pid = QPid}, ChPid, NoAck) -> - gen_server2:call(QPid, {basic_get, ChPid, NoAck}, infinity). + delegate_call(QPid, {basic_get, ChPid, NoAck}, infinity). -basic_consume(#amqqueue{pid = QPid}, NoAck, ReaderPid, ChPid, LimiterPid, +basic_consume(#amqqueue{pid = QPid}, NoAck, ChPid, LimiterPid, ConsumerTag, ExclusiveConsume, OkMsg) -> - gen_server2:call(QPid, {basic_consume, NoAck, ReaderPid, ChPid, - LimiterPid, ConsumerTag, ExclusiveConsume, OkMsg}, - infinity). + delegate_call(QPid, {basic_consume, NoAck, ChPid, + LimiterPid, ConsumerTag, ExclusiveConsume, OkMsg}, + infinity). basic_cancel(#amqqueue{pid = QPid}, ChPid, ConsumerTag, OkMsg) -> - ok = gen_server2:call(QPid, {basic_cancel, ChPid, ConsumerTag, OkMsg}, - infinity). + ok = delegate_call(QPid, {basic_cancel, ChPid, ConsumerTag, OkMsg}, + infinity). notify_sent(QPid, ChPid) -> - gen_server2:pcast(QPid, 8, {notify_sent, ChPid}). + delegate_cast(QPid, {notify_sent, ChPid}). unblock(QPid, ChPid) -> - gen_server2:pcast(QPid, 8, {unblock, ChPid}). + delegate_cast(QPid, {unblock, ChPid}). + +flush_all(QPids, ChPid) -> + delegate:invoke_no_result( + QPids, fun (QPid) -> gen_server2:cast(QPid, {flush, ChPid}) end). + +internal_delete1(QueueName) -> + ok = mnesia:delete({rabbit_queue, QueueName}), + ok = mnesia:delete({rabbit_durable_queue, QueueName}), + %% we want to execute some things, as decided by rabbit_exchange, + %% after the transaction. + rabbit_binding:remove_for_destination(QueueName). internal_delete(QueueName) -> - rabbit_misc:execute_mnesia_transaction( + rabbit_misc:execute_mnesia_tx_with_tail( fun () -> case mnesia:wread({rabbit_queue, QueueName}) of - [] -> {error, not_found}; - [_] -> - ok = rabbit_exchange:delete_queue_bindings(QueueName), - ok = mnesia:delete({rabbit_queue, QueueName}), - ok = mnesia:delete({rabbit_durable_queue, QueueName}), - ok + [] -> rabbit_misc:const({error, not_found}); + [_] -> Deletions = internal_delete1(QueueName), + fun (Tx) -> ok = rabbit_binding:process_deletions( + Deletions, Tx) + end end end). +maybe_run_queue_via_backing_queue(QPid, Fun) -> + gen_server2:call(QPid, {maybe_run_queue_via_backing_queue, Fun}, infinity). + +maybe_run_queue_via_backing_queue_async(QPid, Fun) -> + gen_server2:cast(QPid, {maybe_run_queue_via_backing_queue, Fun}). + +sync_timeout(QPid) -> + gen_server2:cast(QPid, sync_timeout). + +update_ram_duration(QPid) -> + gen_server2:cast(QPid, update_ram_duration). + +set_ram_duration_target(QPid, Duration) -> + gen_server2:cast(QPid, {set_ram_duration_target, Duration}). + +set_maximum_since_use(QPid, Age) -> + gen_server2:cast(QPid, {set_maximum_since_use, Age}). + +maybe_expire(QPid) -> + gen_server2:cast(QPid, maybe_expire). + +drop_expired(QPid) -> + gen_server2:cast(QPid, drop_expired). + on_node_down(Node) -> rabbit_misc:execute_mnesia_transaction( - fun () -> - qlc:fold( - fun (QueueName, Acc) -> - ok = rabbit_exchange:delete_transient_queue_bindings( - QueueName), - ok = mnesia:delete({rabbit_queue, QueueName}), - Acc - end, - ok, - qlc:q([QueueName || #amqqueue{name = QueueName, pid = Pid} - <- mnesia:table(rabbit_queue), - node(Pid) == Node])) + fun () -> qlc:e(qlc:q([delete_queue(QueueName) || + #amqqueue{name = QueueName, pid = Pid} + <- mnesia:table(rabbit_queue), + node(Pid) == Node])) + end, + fun (Deletions, Tx) -> + rabbit_binding:process_deletions( + lists:foldl(fun rabbit_binding:combine_deletions/2, + rabbit_binding:new_deletions(), + Deletions), + Tx) end). +delete_queue(QueueName) -> + ok = mnesia:delete({rabbit_queue, QueueName}), + rabbit_binding:remove_transient_for_destination(QueueName). + pseudo_queue(QueueName, Pid) -> #amqqueue{name = QueueName, durable = false, @@ -370,17 +499,18 @@ arguments = [], pid = Pid}. -safe_pmap_ok(H, F, L) -> - case [R || R <- rabbit_misc:upmap( - fun (V) -> - try - rabbit_misc:with_exit_handler( - fun () -> H(V) end, - fun () -> F(V) end) - catch Class:Reason -> {Class, Reason} - end - end, L), - R =/= ok] of - [] -> ok; - Errors -> {error, Errors} +safe_delegate_call_ok(F, Pids) -> + case delegate:invoke(Pids, fun (Pid) -> + rabbit_misc:with_exit_handler( + fun () -> ok end, + fun () -> F(Pid) end) + end) of + {_, []} -> ok; + {_, Bad} -> {error, Bad} end. + +delegate_call(Pid, Msg, Timeout) -> + delegate:invoke(Pid, fun (P) -> gen_server2:call(P, Msg, Timeout) end). + +delegate_cast(Pid, Msg) -> + delegate:invoke_no_result(Pid, fun (P) -> gen_server2:cast(P, Msg) end). diff -Nru rabbitmq-server-1.7.2/src/rabbit_amqqueue_process.erl rabbitmq-server-2.3.1/src/rabbit_amqqueue_process.erl --- rabbitmq-server-1.7.2/src/rabbit_amqqueue_process.erl 2010-02-15 16:01:03.000000000 +0000 +++ rabbitmq-server-2.3.1/src/rabbit_amqqueue_process.erl 2011-02-03 12:47:35.000000000 +0000 @@ -1,32 +1,17 @@ -%% The contents of this file are subject to the Mozilla Public License -%% Version 1.1 (the "License"); you may not use this file except in -%% compliance with the License. You may obtain a copy of the License at -%% http://www.mozilla.org/MPL/ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License +%% at http://www.mozilla.org/MPL/ %% -%% Software distributed under the License is distributed on an "AS IS" -%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the -%% License for the specific language governing rights and limitations -%% under the License. +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and +%% limitations under the License. %% -%% The Original Code is RabbitMQ. +%% The Original Code is RabbitMQ. %% -%% The Initial Developers of the Original Code are LShift Ltd, -%% Cohesive Financial Technologies LLC, and Rabbit Technologies Ltd. -%% -%% Portions created before 22-Nov-2008 00:00:00 GMT by LShift Ltd, -%% Cohesive Financial Technologies LLC, or Rabbit Technologies Ltd -%% are Copyright (C) 2007-2008 LShift Ltd, Cohesive Financial -%% Technologies LLC, and Rabbit Technologies Ltd. -%% -%% Portions created by LShift Ltd are Copyright (C) 2007-2010 LShift -%% Ltd. Portions created by Cohesive Financial Technologies LLC are -%% Copyright (C) 2007-2010 Cohesive Financial Technologies -%% LLC. Portions created by Rabbit Technologies Ltd are Copyright -%% (C) 2007-2010 Rabbit Technologies Ltd. -%% -%% All Rights Reserved. -%% -%% Contributor(s): ______________________________________. +%% The Initial Developer of the Original Code is VMware, Inc. +%% Copyright (c) 2007-2011 VMware, Inc. All rights reserved. %% -module(rabbit_amqqueue_process). @@ -35,96 +20,258 @@ -behaviour(gen_server2). --define(UNSENT_MESSAGE_LIMIT, 100). --define(HIBERNATE_AFTER_MIN, 1000). --define(DESIRED_HIBERNATE, 10000). +-define(UNSENT_MESSAGE_LIMIT, 100). +-define(SYNC_INTERVAL, 5). %% milliseconds +-define(RAM_DURATION_UPDATE_INTERVAL, 5000). --export([start_link/1, info_keys/0]). +-define(BASE_MESSAGE_PROPERTIES, + #message_properties{expiry = undefined, needs_confirming = false}). --export([init/1, terminate/2, code_change/3, handle_call/3, handle_cast/2, handle_info/2]). +-export([start_link/1, info_keys/0]). --import(queue). --import(erlang). --import(lists). +-export([init/1, terminate/2, code_change/3, handle_call/3, handle_cast/2, + handle_info/2, handle_pre_hibernate/1, prioritise_call/3, + prioritise_cast/2, prioritise_info/2]). % Queue's state -record(q, {q, - owner, exclusive_consumer, has_had_consumers, - next_msg_id, - message_buffer, + backing_queue, + backing_queue_state, active_consumers, - blocked_consumers}). + blocked_consumers, + expires, + sync_timer_ref, + rate_timer_ref, + expiry_timer_ref, + stats_timer, + guid_to_channel, + ttl, + ttl_timer_ref + }). -record(consumer, {tag, ack_required}). --record(tx, {ch_pid, is_persistent, pending_messages, pending_acks}). - %% These are held in our process dictionary -record(cr, {consumer_count, ch_pid, limiter_pid, monitor_ref, - unacked_messages, + acktags, is_limit_active, txn, unsent_message_count}). --define(INFO_KEYS, - [name, - durable, - auto_delete, - arguments, - pid, - owner_pid, +-define(STATISTICS_KEYS, + [pid, exclusive_consumer_pid, exclusive_consumer_tag, messages_ready, messages_unacknowledged, - messages_uncommitted, messages, - acks_uncommitted, consumers, - transactions, - memory]). + memory, + backing_queue_status + ]). + +-define(CREATION_EVENT_KEYS, + [pid, + name, + durable, + auto_delete, + arguments, + owner_pid + ]). + +-define(INFO_KEYS, ?CREATION_EVENT_KEYS ++ ?STATISTICS_KEYS -- [pid]). %%---------------------------------------------------------------------------- start_link(Q) -> gen_server2:start_link(?MODULE, Q, []). info_keys() -> ?INFO_KEYS. - + %%---------------------------------------------------------------------------- init(Q) -> ?LOGDEBUG("Queue starting - ~p~n", [Q]), - {ok, #q{q = Q, - owner = none, - exclusive_consumer = none, - has_had_consumers = false, - next_msg_id = 1, - message_buffer = queue:new(), - active_consumers = queue:new(), - blocked_consumers = queue:new()}, hibernate, + process_flag(trap_exit, true), + {ok, BQ} = application:get_env(backing_queue_module), + + {ok, #q{q = Q#amqqueue{pid = self()}, + exclusive_consumer = none, + has_had_consumers = false, + backing_queue = BQ, + backing_queue_state = undefined, + active_consumers = queue:new(), + blocked_consumers = queue:new(), + expires = undefined, + sync_timer_ref = undefined, + rate_timer_ref = undefined, + expiry_timer_ref = undefined, + ttl = undefined, + stats_timer = rabbit_event:init_stats_timer(), + guid_to_channel = dict:new()}, hibernate, {backoff, ?HIBERNATE_AFTER_MIN, ?HIBERNATE_AFTER_MIN, ?DESIRED_HIBERNATE}}. -terminate(_Reason, State) -> +terminate(shutdown, State = #q{backing_queue = BQ}) -> + terminate_shutdown(fun (BQS) -> BQ:terminate(BQS) end, State); +terminate({shutdown, _}, State = #q{backing_queue = BQ}) -> + terminate_shutdown(fun (BQS) -> BQ:terminate(BQS) end, State); +terminate(_Reason, State = #q{backing_queue = BQ}) -> %% FIXME: How do we cancel active subscriptions? - QName = qname(State), - lists:foreach(fun (Txn) -> ok = rollback_work(Txn, QName) end, - all_tx()), - ok = purge_message_buffer(QName, State#q.message_buffer), - ok = rabbit_amqqueue:internal_delete(QName). + terminate_shutdown(fun (BQS) -> + BQS1 = BQ:delete_and_terminate(BQS), + %% don't care if the internal delete + %% doesn't return 'ok'. + rabbit_amqqueue:internal_delete(qname(State)), + BQS1 + end, State). code_change(_OldVsn, State, _Extra) -> {ok, State}. %%---------------------------------------------------------------------------- -reply(Reply, NewState) -> {reply, Reply, NewState, hibernate}. +declare(Recover, From, + State = #q{q = Q = #amqqueue{name = QName, durable = IsDurable}, + backing_queue = BQ, backing_queue_state = undefined, + stats_timer = StatsTimer}) -> + case rabbit_amqqueue:internal_declare(Q, Recover) of + not_found -> {stop, normal, not_found, State}; + Q -> gen_server2:reply(From, {new, Q}), + ok = file_handle_cache:register_callback( + rabbit_amqqueue, set_maximum_since_use, + [self()]), + ok = rabbit_memory_monitor:register( + self(), {rabbit_amqqueue, + set_ram_duration_target, [self()]}), + BQS = BQ:init(QName, IsDurable, Recover), + State1 = process_args(State#q{backing_queue_state = BQS}), + rabbit_event:notify(queue_created, + infos(?CREATION_EVENT_KEYS, State1)), + rabbit_event:if_enabled(StatsTimer, + fun() -> emit_stats(State1) end), + noreply(State1); + Q1 -> {stop, normal, {existing, Q1}, State} + end. + +process_args(State = #q{q = #amqqueue{arguments = Arguments}}) -> + lists:foldl(fun({Arg, Fun}, State1) -> + case rabbit_misc:table_lookup(Arguments, Arg) of + {_Type, Val} -> Fun(Val, State1); + undefined -> State1 + end + end, State, [{<<"x-expires">>, fun init_expires/2}, + {<<"x-message-ttl">>, fun init_ttl/2}]). + +init_expires(Expires, State) -> ensure_expiry_timer(State#q{expires = Expires}). + +init_ttl(TTL, State) -> drop_expired_messages(State#q{ttl = TTL}). + +terminate_shutdown(Fun, State) -> + State1 = #q{backing_queue = BQ, backing_queue_state = BQS} = + stop_sync_timer(stop_rate_timer(State)), + case BQS of + undefined -> State; + _ -> ok = rabbit_memory_monitor:deregister(self()), + BQS1 = lists:foldl( + fun (#cr{txn = none}, BQSN) -> + BQSN; + (#cr{txn = Txn}, BQSN) -> + {_AckTags, BQSN1} = + BQ:tx_rollback(Txn, BQSN), + BQSN1 + end, BQS, all_ch_record()), + [emit_consumer_deleted(Ch, CTag) + || {Ch, CTag, _} <- consumers(State1)], + rabbit_event:notify(queue_deleted, [{pid, self()}]), + State1#q{backing_queue_state = Fun(BQS1)} + end. + +reply(Reply, NewState) -> + assert_invariant(NewState), + {NewState1, Timeout} = next_state(NewState), + {reply, Reply, NewState1, Timeout}. + +noreply(NewState) -> + assert_invariant(NewState), + {NewState1, Timeout} = next_state(NewState), + {noreply, NewState1, Timeout}. + +next_state(State) -> + State1 = #q{backing_queue = BQ, backing_queue_state = BQS} = + ensure_rate_timer(State), + State2 = ensure_stats_timer(State1), + case BQ:needs_idle_timeout(BQS) of + true -> {ensure_sync_timer(State2), 0}; + false -> {stop_sync_timer(State2), hibernate} + end. + +ensure_sync_timer(State = #q{sync_timer_ref = undefined}) -> + {ok, TRef} = timer:apply_after( + ?SYNC_INTERVAL, rabbit_amqqueue, sync_timeout, [self()]), + State#q{sync_timer_ref = TRef}; +ensure_sync_timer(State) -> + State. + +stop_sync_timer(State = #q{sync_timer_ref = undefined}) -> + State; +stop_sync_timer(State = #q{sync_timer_ref = TRef}) -> + {ok, cancel} = timer:cancel(TRef), + State#q{sync_timer_ref = undefined}. + +ensure_rate_timer(State = #q{rate_timer_ref = undefined}) -> + {ok, TRef} = timer:apply_after( + ?RAM_DURATION_UPDATE_INTERVAL, + rabbit_amqqueue, update_ram_duration, + [self()]), + State#q{rate_timer_ref = TRef}; +ensure_rate_timer(State = #q{rate_timer_ref = just_measured}) -> + State#q{rate_timer_ref = undefined}; +ensure_rate_timer(State) -> + State. + +stop_rate_timer(State = #q{rate_timer_ref = undefined}) -> + State; +stop_rate_timer(State = #q{rate_timer_ref = just_measured}) -> + State#q{rate_timer_ref = undefined}; +stop_rate_timer(State = #q{rate_timer_ref = TRef}) -> + {ok, cancel} = timer:cancel(TRef), + State#q{rate_timer_ref = undefined}. + +stop_expiry_timer(State = #q{expiry_timer_ref = undefined}) -> + State; +stop_expiry_timer(State = #q{expiry_timer_ref = TRef}) -> + {ok, cancel} = timer:cancel(TRef), + State#q{expiry_timer_ref = undefined}. + +%% We wish to expire only when there are no consumers *and* the expiry +%% hasn't been refreshed (by queue.declare or basic.get) for the +%% configured period. +ensure_expiry_timer(State = #q{expires = undefined}) -> + State; +ensure_expiry_timer(State = #q{expires = Expires}) -> + case is_unused(State) of + true -> + NewState = stop_expiry_timer(State), + {ok, TRef} = timer:apply_after( + Expires, rabbit_amqqueue, maybe_expire, [self()]), + NewState#q{expiry_timer_ref = TRef}; + false -> + State + end. -noreply(NewState) -> {noreply, NewState, hibernate}. +ensure_stats_timer(State = #q{stats_timer = StatsTimer, + q = Q}) -> + State#q{stats_timer = rabbit_event:ensure_stats_timer( + StatsTimer, + fun() -> rabbit_amqqueue:emit_stats(Q) end)}. + +assert_invariant(#q{active_consumers = AC, + backing_queue = BQ, backing_queue_state = BQS}) -> + true = (queue:is_empty(AC) orelse BQ:is_empty(BQS)). lookup_ch(ChPid) -> case get({ch, ChPid}) of @@ -140,7 +287,7 @@ C = #cr{consumer_count = 0, ch_pid = ChPid, monitor_ref = MonitorRef, - unacked_messages = dict:new(), + acktags = sets:new(), is_limit_active = false, txn = none, unsent_message_count = 0}, @@ -152,6 +299,25 @@ store_ch_record(C = #cr{ch_pid = ChPid}) -> put({ch, ChPid}, C). +maybe_store_ch_record(C = #cr{consumer_count = ConsumerCount, + acktags = ChAckTags, + txn = Txn, + unsent_message_count = UnsentMessageCount}) -> + case {sets:size(ChAckTags), ConsumerCount, UnsentMessageCount, Txn} of + {0, 0, 0, none} -> ok = erase_ch_record(C), + false; + _ -> store_ch_record(C), + true + end. + +erase_ch_record(#cr{ch_pid = ChPid, + limiter_pid = LimiterPid, + monitor_ref = MonitorRef}) -> + ok = rabbit_limiter:unregister(LimiterPid, self()), + erlang:demonitor(MonitorRef), + erase({ch, ChPid}), + ok. + all_ch_record() -> [C || {{ch, _}, C} <- get()]. @@ -166,35 +332,34 @@ true -> ok end. -record_current_channel_tx(ChPid, Txn) -> - %% as a side effect this also starts monitoring the channel (if - %% that wasn't happening already) - store_ch_record((ch_record(ChPid))#cr{txn = Txn}). - -deliver_immediately(Message, IsDelivered, - State = #q{q = #amqqueue{name = QName}, - active_consumers = ActiveConsumers, - blocked_consumers = BlockedConsumers, - next_msg_id = NextId}) -> +deliver_msgs_to_consumers(Funs = {PredFun, DeliverFun}, FunAcc, + State = #q{q = #amqqueue{name = QName}, + active_consumers = ActiveConsumers, + blocked_consumers = BlockedConsumers}) -> case queue:out(ActiveConsumers) of {{value, QEntry = {ChPid, #consumer{tag = ConsumerTag, ack_required = AckRequired}}}, ActiveConsumersTail} -> C = #cr{limiter_pid = LimiterPid, unsent_message_count = Count, - unacked_messages = UAM} = ch_record(ChPid), - case rabbit_limiter:can_send(LimiterPid, self(), AckRequired) of + acktags = ChAckTags} = ch_record(ChPid), + IsMsgReady = PredFun(FunAcc, State), + case (IsMsgReady andalso + rabbit_limiter:can_send( LimiterPid, self(), AckRequired )) of true -> + {{Message, IsDelivered, AckTag}, FunAcc1, State1} = + DeliverFun(AckRequired, FunAcc, State), rabbit_channel:deliver( ChPid, ConsumerTag, AckRequired, - {QName, self(), NextId, IsDelivered, Message}), - NewUAM = case AckRequired of - true -> dict:store(NextId, Message, UAM); - false -> UAM - end, + {QName, self(), AckTag, IsDelivered, Message}), + ChAckTags1 = + case AckRequired of + true -> sets:add_element(AckTag, ChAckTags); + false -> ChAckTags + end, NewC = C#cr{unsent_message_count = Count + 1, - unacked_messages = NewUAM}, - store_ch_record(NewC), + acktags = ChAckTags1}, + true = maybe_store_ch_record(NewC), {NewActiveConsumers, NewBlockedConsumers} = case ch_record_state_transition(C, NewC) of ok -> {queue:in(QEntry, ActiveConsumersTail), @@ -207,93 +372,177 @@ {ActiveConsumers1, queue:in(QEntry, BlockedConsumers1)} end, - {offered, AckRequired, - State#q{active_consumers = NewActiveConsumers, - blocked_consumers = NewBlockedConsumers, - next_msg_id = NextId + 1}}; - false -> - store_ch_record(C#cr{is_limit_active = true}), + State2 = State1#q{ + active_consumers = NewActiveConsumers, + blocked_consumers = NewBlockedConsumers}, + deliver_msgs_to_consumers(Funs, FunAcc1, State2); + %% if IsMsgReady then we've hit the limiter + false when IsMsgReady -> + true = maybe_store_ch_record(C#cr{is_limit_active = true}), {NewActiveConsumers, NewBlockedConsumers} = move_consumers(ChPid, ActiveConsumers, BlockedConsumers), - deliver_immediately( - Message, IsDelivered, + deliver_msgs_to_consumers( + Funs, FunAcc, State#q{active_consumers = NewActiveConsumers, - blocked_consumers = NewBlockedConsumers}) + blocked_consumers = NewBlockedConsumers}); + false -> + %% no message was ready, so we don't need to block anyone + {FunAcc, State} end; {empty, _} -> - {not_offered, State} + {FunAcc, State} end. -run_message_queue(State = #q{message_buffer = MessageBuffer}) -> - run_message_queue(MessageBuffer, State). +deliver_from_queue_pred(IsEmpty, _State) -> + not IsEmpty. -run_message_queue(MessageBuffer, State) -> - case queue:out(MessageBuffer) of - {{value, {Message, IsDelivered}}, BufferTail} -> - case deliver_immediately(Message, IsDelivered, State) of - {offered, true, NewState} -> - persist_delivery(qname(State), Message, IsDelivered), - run_message_queue(BufferTail, NewState); - {offered, false, NewState} -> - persist_auto_ack(qname(State), Message), - run_message_queue(BufferTail, NewState); - {not_offered, NewState} -> - NewState#q{message_buffer = MessageBuffer} - end; - {empty, _} -> - State#q{message_buffer = MessageBuffer} - end. +deliver_from_queue_deliver(AckRequired, false, State) -> + {{Message, IsDelivered, AckTag, Remaining}, State1} = + fetch(AckRequired, State), + {{Message, IsDelivered, AckTag}, 0 == Remaining, State1}. + +confirm_messages(Guids, State = #q{guid_to_channel = GTC}) -> + {CMs, GTC1} = + lists:foldl( + fun(Guid, {CMs, GTC0}) -> + case dict:find(Guid, GTC0) of + {ok, {ChPid, MsgSeqNo}} -> + {[{ChPid, MsgSeqNo} | CMs], dict:erase(Guid, GTC0)}; + _ -> + {CMs, GTC0} + end + end, {[], GTC}, Guids), + case lists:usort(CMs) of + [{Ch, MsgSeqNo} | CMs1] -> + [rabbit_channel:confirm(ChPid, MsgSeqNos) || + {ChPid, MsgSeqNos} <- group_confirms_by_channel( + CMs1, [{Ch, [MsgSeqNo]}])]; + [] -> + ok + end, + State#q{guid_to_channel = GTC1}. -attempt_delivery(none, _ChPid, Message, State) -> - case deliver_immediately(Message, false, State) of - {offered, false, State1} -> - {true, State1}; - {offered, true, State1} -> - persist_message(none, qname(State), Message), - persist_delivery(qname(State), Message, false), +group_confirms_by_channel([], Acc) -> + Acc; +group_confirms_by_channel([{Ch, Msg1} | CMs], [{Ch, Msgs} | Acc]) -> + group_confirms_by_channel(CMs, [{Ch, [Msg1 | Msgs]} | Acc]); +group_confirms_by_channel([{Ch, Msg1} | CMs], Acc) -> + group_confirms_by_channel(CMs, [{Ch, [Msg1]} | Acc]). + +record_confirm_message(#delivery{msg_seq_no = undefined}, State) -> + {no_confirm, State}; +record_confirm_message(#delivery{sender = ChPid, + msg_seq_no = MsgSeqNo, + message = #basic_message { + is_persistent = true, + guid = Guid}}, + State = + #q{guid_to_channel = GTC, + q = #amqqueue{durable = true}}) -> + {confirm, + State#q{guid_to_channel = dict:store(Guid, {ChPid, MsgSeqNo}, GTC)}}; +record_confirm_message(_Delivery, State) -> + {no_confirm, State}. + +run_message_queue(State) -> + Funs = {fun deliver_from_queue_pred/2, + fun deliver_from_queue_deliver/3}, + State1 = #q{backing_queue = BQ, backing_queue_state = BQS} = + drop_expired_messages(State), + IsEmpty = BQ:is_empty(BQS), + {_IsEmpty1, State2} = deliver_msgs_to_consumers(Funs, IsEmpty, State1), + State2. + +attempt_delivery(#delivery{txn = none, + sender = ChPid, + message = Message, + msg_seq_no = MsgSeqNo}, + {NeedsConfirming, State = #q{backing_queue = BQ}}) -> + %% must confirm immediately if it has a MsgSeqNo and not NeedsConfirming + case {NeedsConfirming, MsgSeqNo} of + {_, undefined} -> ok; + {no_confirm, _} -> rabbit_channel:confirm(ChPid, [MsgSeqNo]); + {confirm, _} -> ok + end, + PredFun = fun (IsEmpty, _State) -> not IsEmpty end, + DeliverFun = + fun (AckRequired, false, State1 = #q{backing_queue_state = BQS}) -> + %% we don't need an expiry here because messages are + %% not being enqueued, so we use an empty + %% message_properties. + {AckTag, BQS1} = + BQ:publish_delivered( + AckRequired, Message, + (?BASE_MESSAGE_PROPERTIES)#message_properties{ + needs_confirming = (NeedsConfirming =:= confirm)}, + BQS), + {{Message, false, AckTag}, true, + State1#q{backing_queue_state = BQS1}} + end, + {Delivered, State1} = + deliver_msgs_to_consumers({ PredFun, DeliverFun }, false, State), + {Delivered, NeedsConfirming, State1}; +attempt_delivery(#delivery{txn = Txn, + sender = ChPid, + message = Message}, + {NeedsConfirming, + State = #q{backing_queue = BQ, + backing_queue_state = BQS}}) -> + store_ch_record((ch_record(ChPid))#cr{txn = Txn}), + {true, + NeedsConfirming, + State#q{backing_queue_state = + BQ:tx_publish(Txn, Message, ?BASE_MESSAGE_PROPERTIES, BQS)}}. + +deliver_or_enqueue(Delivery, State) -> + case attempt_delivery(Delivery, record_confirm_message(Delivery, State)) of + {true, _, State1} -> {true, State1}; - {not_offered, State1} -> - {false, State1} - end; -attempt_delivery(Txn, ChPid, Message, State) -> - persist_message(Txn, qname(State), Message), - record_pending_message(Txn, ChPid, Message), - {true, State}. - -deliver_or_enqueue(Txn, ChPid, Message, State) -> - case attempt_delivery(Txn, ChPid, Message, State) of - {true, NewState} -> - {true, NewState}; - {false, NewState} -> - persist_message(Txn, qname(State), Message), - NewMB = queue:in({Message, false}, NewState#q.message_buffer), - {false, NewState#q{message_buffer = NewMB}} - end. - -deliver_or_enqueue_n(Messages, State = #q{message_buffer = MessageBuffer}) -> - run_message_queue(queue:join(MessageBuffer, queue:from_list(Messages)), - State). + {false, NeedsConfirming, State1 = #q{backing_queue = BQ, + backing_queue_state = BQS}} -> + #delivery{message = Message} = Delivery, + BQS1 = BQ:publish(Message, + (message_properties(State)) #message_properties{ + needs_confirming = + (NeedsConfirming =:= confirm)}, + BQS), + {false, ensure_ttl_timer(State1#q{backing_queue_state = BQS1})} + end. + +requeue_and_run(AckTags, State = #q{backing_queue = BQ, ttl=TTL}) -> + maybe_run_queue_via_backing_queue( + fun (BQS) -> + {[], BQ:requeue(AckTags, reset_msg_expiry_fun(TTL), BQS)} + end, State). + +fetch(AckRequired, State = #q{backing_queue_state = BQS, + backing_queue = BQ}) -> + {Result, BQS1} = BQ:fetch(AckRequired, BQS), + {Result, State#q{backing_queue_state = BQS1}}. add_consumer(ChPid, Consumer, Queue) -> queue:in({ChPid, Consumer}, Queue). remove_consumer(ChPid, ConsumerTag, Queue) -> - %% TODO: replace this with queue:filter/2 once we move to R12 - queue:from_list(lists:filter( - fun ({CP, #consumer{tag = CT}}) -> - (CP /= ChPid) or (CT /= ConsumerTag) - end, queue:to_list(Queue))). + queue:filter(fun ({CP, #consumer{tag = CT}}) -> + (CP /= ChPid) or (CT /= ConsumerTag) + end, Queue). remove_consumers(ChPid, Queue) -> - %% TODO: replace this with queue:filter/2 once we move to R12 - queue:from_list(lists:filter(fun ({CP, _}) -> CP /= ChPid end, - queue:to_list(Queue))). + {Kept, Removed} = split_by_channel(ChPid, Queue), + [emit_consumer_deleted(Ch, CTag) || + {Ch, #consumer{tag = CTag}} <- queue:to_list(Removed)], + Kept. move_consumers(ChPid, From, To) -> + {Kept, Removed} = split_by_channel(ChPid, From), + {Kept, queue:join(To, Removed)}. + +split_by_channel(ChPid, Queue) -> {Kept, Removed} = lists:partition(fun ({CP, _}) -> CP /= ChPid end, - queue:to_list(From)), - {queue:from_list(Kept), queue:join(To, queue:from_list(Removed))}. + queue:to_list(Queue)), + {queue:from_list(Kept), queue:from_list(Removed)}. possibly_unblock(State, ChPid, Update) -> case lookup_ch(ChPid) of @@ -301,7 +550,7 @@ State; C -> NewC = Update(C), - store_ch_record(NewC), + maybe_store_ch_record(NewC), case ch_record_state_transition(C, NewC) of ok -> State; unblock -> {NewBlockedConsumers, NewActiveConsumers} = @@ -322,10 +571,8 @@ case lookup_ch(DownPid) of not_found -> {ok, State}; - #cr{monitor_ref = MonitorRef, ch_pid = ChPid, txn = Txn, - unacked_messages = UAM} -> - erlang:demonitor(MonitorRef), - erase({ch, ChPid}), + C = #cr{ch_pid = ChPid, txn = Txn, acktags = ChAckTags} -> + ok = erase_ch_record(C), State1 = State#q{ exclusive_consumer = case Holder of {ChPid, _} -> none; @@ -337,15 +584,13 @@ ChPid, State#q.blocked_consumers)}, case should_auto_delete(State1) of true -> {stop, State1}; - false -> case Txn of - none -> ok; - _ -> ok = rollback_work(Txn, qname(State1)), - erase_tx(Txn) - end, - {ok, deliver_or_enqueue_n( - [{Message, true} || - {_MsgId, Message} <- dict:to_list(UAM)], - State1)} + false -> State2 = case Txn of + none -> State1; + _ -> rollback_transaction(Txn, C, + State1) + end, + {ok, requeue_and_run(sets:to_list(ChAckTags), + ensure_expiry_timer(State2))} end end. @@ -354,10 +599,6 @@ cancel_holder(_ChPid, _ConsumerTag, Holder) -> Holder. -check_queue_owner(none, _) -> ok; -check_queue_owner({ReaderPid, _}, ReaderPid) -> ok; -check_queue_owner({_, _}, _) -> mismatch. - check_exclusive_access({_ChPid, _ConsumerTag}, _ExclusiveConsume, _State) -> in_use; check_exclusive_access(none, false, _State) -> @@ -376,134 +617,74 @@ qname(#q{q = #amqqueue{name = QName}}) -> QName. -persist_message(_Txn, _QName, #basic_message{persistent_key = none}) -> - ok; -persist_message(Txn, QName, Message) -> - M = Message#basic_message{ - %% don't persist any recoverable decoded properties, rebuild from properties_bin on restore - content = rabbit_binary_parser:clear_decoded_content( - Message#basic_message.content)}, - persist_work(Txn, QName, - [{publish, M, {QName, M#basic_message.persistent_key}}]). +backing_queue_idle_timeout(State = #q{backing_queue = BQ}) -> + maybe_run_queue_via_backing_queue( + fun (BQS) -> {[], BQ:idle_timeout(BQS)} end, State). + +maybe_run_queue_via_backing_queue(Fun, State = #q{backing_queue_state = BQS}) -> + {Guids, BQS1} = Fun(BQS), + run_message_queue( + confirm_messages(Guids, State#q{backing_queue_state = BQS1})). + +commit_transaction(Txn, From, C = #cr{acktags = ChAckTags}, + State = #q{backing_queue = BQ, + backing_queue_state = BQS, + ttl = TTL}) -> + {AckTags, BQS1} = BQ:tx_commit( + Txn, fun () -> gen_server2:reply(From, ok) end, + reset_msg_expiry_fun(TTL), BQS), + ChAckTags1 = subtract_acks(ChAckTags, AckTags), + maybe_store_ch_record(C#cr{acktags = ChAckTags1, txn = none}), + State#q{backing_queue_state = BQS1}. + +rollback_transaction(Txn, C, State = #q{backing_queue = BQ, + backing_queue_state = BQS}) -> + {_AckTags, BQS1} = BQ:tx_rollback(Txn, BQS), + %% Iff we removed acktags from the channel record on ack+txn then + %% we would add them back in here. + maybe_store_ch_record(C#cr{txn = none}), + State#q{backing_queue_state = BQS1}. + +subtract_acks(A, B) when is_list(B) -> + lists:foldl(fun sets:del_element/2, A, B). + +reset_msg_expiry_fun(TTL) -> + fun(MsgProps) -> + MsgProps#message_properties{expiry = calculate_msg_expiry(TTL)} + end. -persist_delivery(_QName, _Message, - true) -> - ok; -persist_delivery(_QName, #basic_message{persistent_key = none}, - _IsDelivered) -> - ok; -persist_delivery(QName, #basic_message{persistent_key = PKey}, - _IsDelivered) -> - persist_work(none, QName, [{deliver, {QName, PKey}}]). - -persist_acks(Txn, QName, Messages) -> - persist_work(Txn, QName, - [{ack, {QName, PKey}} || - #basic_message{persistent_key = PKey} <- Messages, - PKey =/= none]). +message_properties(#q{ttl=TTL}) -> + #message_properties{expiry = calculate_msg_expiry(TTL)}. -persist_auto_ack(_QName, #basic_message{persistent_key = none}) -> - ok; -persist_auto_ack(QName, #basic_message{persistent_key = PKey}) -> - %% auto-acks are always non-transactional - rabbit_persister:dirty_work([{ack, {QName, PKey}}]). +calculate_msg_expiry(undefined) -> undefined; +calculate_msg_expiry(TTL) -> now_millis() + (TTL * 1000). -persist_work(_Txn,_QName, []) -> - ok; -persist_work(none, _QName, WorkList) -> - rabbit_persister:dirty_work(WorkList); -persist_work(Txn, QName, WorkList) -> - mark_tx_persistent(Txn), - rabbit_persister:extend_transaction({Txn, QName}, WorkList). - -commit_work(Txn, QName) -> - do_if_persistent(fun rabbit_persister:commit_transaction/1, - Txn, QName). - -rollback_work(Txn, QName) -> - do_if_persistent(fun rabbit_persister:rollback_transaction/1, - Txn, QName). - -%% optimisation: don't do unnecessary work -%% it would be nice if this was handled by the persister -do_if_persistent(F, Txn, QName) -> - case is_tx_persistent(Txn) of - false -> ok; - true -> ok = F({Txn, QName}) - end. - -lookup_tx(Txn) -> - case get({txn, Txn}) of - undefined -> #tx{ch_pid = none, - is_persistent = false, - pending_messages = [], - pending_acks = []}; - V -> V - end. - -store_tx(Txn, Tx) -> - put({txn, Txn}, Tx). - -erase_tx(Txn) -> - erase({txn, Txn}). - -all_tx_record() -> - [T || {{txn, _}, T} <- get()]. - -all_tx() -> - [Txn || {{txn, Txn}, _} <- get()]. - -mark_tx_persistent(Txn) -> - Tx = lookup_tx(Txn), - store_tx(Txn, Tx#tx{is_persistent = true}). - -is_tx_persistent(Txn) -> - #tx{is_persistent = Res} = lookup_tx(Txn), - Res. - -record_pending_message(Txn, ChPid, Message) -> - Tx = #tx{pending_messages = Pending} = lookup_tx(Txn), - record_current_channel_tx(ChPid, Txn), - store_tx(Txn, Tx#tx{pending_messages = [{Message, false} | Pending], - ch_pid = ChPid}). - -record_pending_acks(Txn, ChPid, MsgIds) -> - Tx = #tx{pending_acks = Pending} = lookup_tx(Txn), - record_current_channel_tx(ChPid, Txn), - store_tx(Txn, Tx#tx{pending_acks = [MsgIds | Pending], - ch_pid = ChPid}). - -process_pending(Txn, State) -> - #tx{ch_pid = ChPid, - pending_messages = PendingMessages, - pending_acks = PendingAcks} = lookup_tx(Txn), - case lookup_ch(ChPid) of - not_found -> ok; - C = #cr{unacked_messages = UAM} -> - {_Acked, Remaining} = - collect_messages(lists:append(PendingAcks), UAM), - store_ch_record(C#cr{unacked_messages = Remaining}) - end, - deliver_or_enqueue_n(lists:reverse(PendingMessages), State). +drop_expired_messages(State = #q{ttl = undefined}) -> + State; +drop_expired_messages(State = #q{backing_queue_state = BQS, + backing_queue = BQ}) -> + Now = now_millis(), + BQS1 = BQ:dropwhile( + fun (#message_properties{expiry = Expiry}) -> + Now > Expiry + end, BQS), + ensure_ttl_timer(State#q{backing_queue_state = BQS1}). + +ensure_ttl_timer(State = #q{backing_queue = BQ, + backing_queue_state = BQS, + ttl = TTL, + ttl_timer_ref = undefined}) + when TTL =/= undefined -> + case BQ:is_empty(BQS) of + true -> State; + false -> TRef = timer:apply_after(TTL, rabbit_amqqueue, drop_expired, + [self()]), + State#q{ttl_timer_ref = TRef} + end; +ensure_ttl_timer(State) -> + State. -collect_messages(MsgIds, UAM) -> - lists:mapfoldl( - fun (MsgId, D) -> {dict:fetch(MsgId, D), dict:erase(MsgId, D)} end, - UAM, MsgIds). - -purge_message_buffer(QName, MessageBuffer) -> - Messages = - [[Message || {Message, _IsDelivered} <- - queue:to_list(MessageBuffer)] | - lists:map( - fun (#cr{unacked_messages = UAM}) -> - [Message || {_MsgId, Message} <- dict:to_list(UAM)] - end, - all_ch_record())], - %% the simplest, though certainly not the most obvious or - %% efficient, way to purge messages from the persister is to - %% artifically ack them. - persist_acks(none, QName, lists:append(Messages)). +now_millis() -> timer:now_diff(now(), {0,0,0}). infos(Items, State) -> [{Item, i(Item, State)} || Item <- Items]. @@ -513,10 +694,10 @@ i(arguments, #q{q = #amqqueue{arguments = Arguments}}) -> Arguments; i(pid, _) -> self(); -i(owner_pid, #q{owner = none}) -> +i(owner_pid, #q{q = #amqqueue{exclusive_owner = none}}) -> ''; -i(owner_pid, #q{owner = {ReaderPid, _MonitorRef}}) -> - ReaderPid; +i(owner_pid, #q{q = #amqqueue{exclusive_owner = ExclusiveOwner}}) -> + ExclusiveOwner; i(exclusive_consumer_pid, #q{exclusive_consumer = none}) -> ''; i(exclusive_consumer_pid, #q{exclusive_consumer = {ChPid, _ConsumerTag}}) -> @@ -525,33 +706,106 @@ ''; i(exclusive_consumer_tag, #q{exclusive_consumer = {_ChPid, ConsumerTag}}) -> ConsumerTag; -i(messages_ready, #q{message_buffer = MessageBuffer}) -> - queue:len(MessageBuffer); +i(messages_ready, #q{backing_queue_state = BQS, backing_queue = BQ}) -> + BQ:len(BQS); i(messages_unacknowledged, _) -> - lists:sum([dict:size(UAM) || - #cr{unacked_messages = UAM} <- all_ch_record()]); -i(messages_uncommitted, _) -> - lists:sum([length(Pending) || - #tx{pending_messages = Pending} <- all_tx_record()]); + lists:sum([sets:size(C#cr.acktags) || C <- all_ch_record()]); i(messages, State) -> lists:sum([i(Item, State) || Item <- [messages_ready, - messages_unacknowledged, - messages_uncommitted]]); -i(acks_uncommitted, _) -> - lists:sum([length(Pending) || - #tx{pending_acks = Pending} <- all_tx_record()]); + messages_unacknowledged]]); i(consumers, State) -> queue:len(State#q.active_consumers) + queue:len(State#q.blocked_consumers); -i(transactions, _) -> - length(all_tx_record()); i(memory, _) -> {memory, M} = process_info(self(), memory), M; +i(backing_queue_status, #q{backing_queue_state = BQS, backing_queue = BQ}) -> + BQ:status(BQS); i(Item, _) -> throw({bad_argument, Item}). +consumers(#q{active_consumers = ActiveConsumers, + blocked_consumers = BlockedConsumers}) -> + rabbit_misc:queue_fold( + fun ({ChPid, #consumer{tag = ConsumerTag, + ack_required = AckRequired}}, Acc) -> + [{ChPid, ConsumerTag, AckRequired} | Acc] + end, [], queue:join(ActiveConsumers, BlockedConsumers)). + +emit_stats(State) -> + emit_stats(State, []). + +emit_stats(State, Extra) -> + rabbit_event:notify(queue_stats, Extra ++ infos(?STATISTICS_KEYS, State)). + +emit_consumer_created(ChPid, ConsumerTag, Exclusive, AckRequired) -> + rabbit_event:notify(consumer_created, + [{consumer_tag, ConsumerTag}, + {exclusive, Exclusive}, + {ack_required, AckRequired}, + {channel, ChPid}, + {queue, self()}]). + +emit_consumer_deleted(ChPid, ConsumerTag) -> + rabbit_event:notify(consumer_deleted, + [{consumer_tag, ConsumerTag}, + {channel, ChPid}, + {queue, self()}]). + %--------------------------------------------------------------------------- +prioritise_call(Msg, _From, _State) -> + case Msg of + info -> 9; + {info, _Items} -> 9; + consumers -> 9; + {maybe_run_queue_via_backing_queue, _Fun} -> 6; + _ -> 0 + end. + +prioritise_cast(Msg, _State) -> + case Msg of + update_ram_duration -> 8; + delete_immediately -> 8; + {set_ram_duration_target, _Duration} -> 8; + {set_maximum_since_use, _Age} -> 8; + maybe_expire -> 8; + drop_expired -> 8; + emit_stats -> 7; + {ack, _Txn, _MsgIds, _ChPid} -> 7; + {reject, _MsgIds, _Requeue, _ChPid} -> 7; + {notify_sent, _ChPid} -> 7; + {unblock, _ChPid} -> 7; + {maybe_run_queue_via_backing_queue, _Fun} -> 6; + sync_timeout -> 6; + _ -> 0 + end. + +prioritise_info({'DOWN', _MonitorRef, process, DownPid, _Reason}, + #q{q = #amqqueue{exclusive_owner = DownPid}}) -> 8; +prioritise_info(_Msg, _State) -> 0. + +handle_call({init, Recover}, From, + State = #q{q = #amqqueue{exclusive_owner = none}}) -> + declare(Recover, From, State); + +handle_call({init, Recover}, From, + State = #q{q = #amqqueue{exclusive_owner = Owner}}) -> + case rpc:call(node(Owner), erlang, is_process_alive, [Owner]) of + true -> erlang:monitor(process, Owner), + declare(Recover, From, State); + _ -> #q{q = #amqqueue{name = QName, durable = IsDurable}, + backing_queue = BQ, backing_queue_state = undefined} = State, + gen_server2:reply(From, not_found), + case Recover of + true -> ok; + _ -> rabbit_log:warning( + "Queue ~p exclusive owner went away~n", [QName]) + end, + BQS = BQ:init(QName, IsDurable, Recover), + %% Rely on terminate to delete the queue. + {stop, normal, State#q{backing_queue_state = BQS}} + end; + handle_call(info, _From, State) -> reply(infos(?INFO_KEYS, State), State); @@ -561,16 +815,11 @@ catch Error -> reply({error, Error}, State) end; -handle_call(consumers, _From, - State = #q{active_consumers = ActiveConsumers, - blocked_consumers = BlockedConsumers}) -> - reply(rabbit_misc:queue_fold( - fun ({ChPid, #consumer{tag = ConsumerTag, - ack_required = AckRequired}}, Acc) -> - [{ChPid, ConsumerTag, AckRequired} | Acc] - end, [], queue:join(ActiveConsumers, BlockedConsumers)), State); +handle_call(consumers, _From, State) -> + reply(consumers(State), State); -handle_call({deliver_immediately, Txn, Message, ChPid}, _From, State) -> +handle_call({deliver_immediately, Delivery}, + _From, State) -> %% Synchronous, "immediate" delivery mode %% %% FIXME: Is this correct semantics? @@ -584,22 +833,23 @@ %% just all ready-to-consume queues get the message, with unready %% queues discarding the message? %% - {Delivered, NewState} = attempt_delivery(Txn, ChPid, Message, State), - reply(Delivered, NewState); - -handle_call({deliver, Txn, Message, ChPid}, _From, State) -> - %% Synchronous, "mandatory" delivery mode - {Delivered, NewState} = deliver_or_enqueue(Txn, ChPid, Message, State), - reply(Delivered, NewState); - -handle_call({commit, Txn}, From, State) -> - ok = commit_work(Txn, qname(State)), - %% optimisation: we reply straight away so the sender can continue - gen_server2:reply(From, ok), - NewState = process_pending(Txn, State), - erase_tx(Txn), + {Delivered, _NeedsConfirming, State1} = + attempt_delivery(Delivery, record_confirm_message(Delivery, State)), + reply(Delivered, State1); + +handle_call({deliver, Delivery}, From, State) -> + %% Synchronous, "mandatory" delivery mode. Reply asap. + gen_server2:reply(From, true), + {_Delivered, NewState} = deliver_or_enqueue(Delivery, State), noreply(NewState); +handle_call({commit, Txn, ChPid}, From, State) -> + case lookup_ch(ChPid) of + not_found -> reply(ok, State); + C -> noreply(run_message_queue( + commit_transaction(Txn, From, C, State))) + end; + handle_call({notify_down, ChPid}, _From, State) -> %% we want to do this synchronously, so that auto_deleted queues %% are no longer visible by the time we send a response to the @@ -612,74 +862,67 @@ end; handle_call({basic_get, ChPid, NoAck}, _From, - State = #q{q = #amqqueue{name = QName}, - next_msg_id = NextId, - message_buffer = MessageBuffer}) -> - case queue:out(MessageBuffer) of - {{value, {Message, IsDelivered}}, BufferTail} -> - AckRequired = not(NoAck), - case AckRequired of - true -> - persist_delivery(QName, Message, IsDelivered), - C = #cr{unacked_messages = UAM} = ch_record(ChPid), - NewUAM = dict:store(NextId, Message, UAM), - store_ch_record(C#cr{unacked_messages = NewUAM}); - false -> - persist_auto_ack(QName, Message) - end, - Msg = {QName, self(), NextId, IsDelivered, Message}, - reply({ok, queue:len(BufferTail), Msg}, - State#q{message_buffer = BufferTail, - next_msg_id = NextId + 1}); - {empty, _} -> - reply(empty, State) + State = #q{q = #amqqueue{name = QName}}) -> + AckRequired = not NoAck, + State1 = ensure_expiry_timer(State), + case fetch(AckRequired, drop_expired_messages(State1)) of + {empty, State2} -> + reply(empty, State2); + {{Message, IsDelivered, AckTag, Remaining}, State2} -> + State3 = + case AckRequired of + true -> C = #cr{acktags = ChAckTags} = ch_record(ChPid), + true = maybe_store_ch_record( + C#cr{acktags = + sets:add_element(AckTag, + ChAckTags)}), + State2; + false -> State2 + end, + Msg = {QName, self(), AckTag, IsDelivered, Message}, + reply({ok, Remaining, Msg}, State3) end; -handle_call({basic_consume, NoAck, ReaderPid, ChPid, LimiterPid, +handle_call({basic_consume, NoAck, ChPid, LimiterPid, ConsumerTag, ExclusiveConsume, OkMsg}, - _From, State = #q{owner = Owner, - exclusive_consumer = ExistingHolder}) -> - case check_queue_owner(Owner, ReaderPid) of - mismatch -> - reply({error, queue_owned_by_another_connection}, State); + _From, State = #q{exclusive_consumer = ExistingHolder}) -> + case check_exclusive_access(ExistingHolder, ExclusiveConsume, + State) of + in_use -> + reply({error, exclusive_consume_unavailable}, State); ok -> - case check_exclusive_access(ExistingHolder, ExclusiveConsume, - State) of - in_use -> - reply({error, exclusive_consume_unavailable}, State); - ok -> - C = #cr{consumer_count = ConsumerCount} = ch_record(ChPid), - Consumer = #consumer{tag = ConsumerTag, - ack_required = not(NoAck)}, - store_ch_record(C#cr{consumer_count = ConsumerCount +1, - limiter_pid = LimiterPid}), - case ConsumerCount of - 0 -> ok = rabbit_limiter:register(LimiterPid, self()); - _ -> ok - end, - ExclusiveConsumer = case ExclusiveConsume of - true -> {ChPid, ConsumerTag}; - false -> ExistingHolder - end, - State1 = State#q{has_had_consumers = true, - exclusive_consumer = ExclusiveConsumer}, - ok = maybe_send_reply(ChPid, OkMsg), - State2 = - case is_ch_blocked(C) of - true -> State1#q{ - blocked_consumers = - add_consumer( - ChPid, Consumer, - State1#q.blocked_consumers)}; - false -> run_message_queue( - State1#q{ - active_consumers = - add_consumer( - ChPid, Consumer, - State1#q.active_consumers)}) - end, - reply(ok, State2) - end + C = #cr{consumer_count = ConsumerCount} = ch_record(ChPid), + Consumer = #consumer{tag = ConsumerTag, + ack_required = not NoAck}, + true = maybe_store_ch_record(C#cr{consumer_count = ConsumerCount +1, + limiter_pid = LimiterPid}), + ok = case ConsumerCount of + 0 -> rabbit_limiter:register(LimiterPid, self()); + _ -> ok + end, + ExclusiveConsumer = if ExclusiveConsume -> {ChPid, ConsumerTag}; + true -> ExistingHolder + end, + State1 = State#q{has_had_consumers = true, + exclusive_consumer = ExclusiveConsumer}, + ok = maybe_send_reply(ChPid, OkMsg), + State2 = + case is_ch_blocked(C) of + true -> State1#q{ + blocked_consumers = + add_consumer( + ChPid, Consumer, + State1#q.blocked_consumers)}; + false -> run_message_queue( + State1#q{ + active_consumers = + add_consumer( + ChPid, Consumer, + State1#q.active_consumers)}) + end, + emit_consumer_created(ChPid, ConsumerTag, ExclusiveConsume, + not NoAck), + reply(ok, State2) end; handle_call({basic_cancel, ChPid, ConsumerTag, OkMsg}, _From, @@ -688,12 +931,16 @@ not_found -> ok = maybe_send_reply(ChPid, OkMsg), reply(ok, State); - C = #cr{consumer_count = ConsumerCount, limiter_pid = LimiterPid} -> - store_ch_record(C#cr{consumer_count = ConsumerCount - 1}), - case ConsumerCount of - 1 -> ok = rabbit_limiter:unregister(LimiterPid, self()); - _ -> ok - end, + C = #cr{consumer_count = ConsumerCount, + limiter_pid = LimiterPid} -> + C1 = C#cr{consumer_count = ConsumerCount -1}, + maybe_store_ch_record( + case ConsumerCount of + 1 -> ok = rabbit_limiter:unregister(LimiterPid, self()), + C1#cr{limiter_pid = undefined}; + _ -> C1 + end), + emit_consumer_deleted(ChPid, ConsumerTag), ok = maybe_send_reply(ChPid, OkMsg), NewState = State#q{exclusive_consumer = cancel_holder(ChPid, @@ -706,20 +953,20 @@ ChPid, ConsumerTag, State#q.blocked_consumers)}, case should_auto_delete(NewState) of - false -> reply(ok, NewState); + false -> reply(ok, ensure_expiry_timer(NewState)); true -> {stop, normal, ok, NewState} end end; -handle_call(stat, _From, State = #q{q = #amqqueue{name = Name}, - message_buffer = MessageBuffer, - active_consumers = ActiveConsumers}) -> - Length = queue:len(MessageBuffer), - reply({ok, Name, Length, queue:len(ActiveConsumers)}, State); +handle_call(stat, _From, State) -> + State1 = #q{backing_queue = BQ, backing_queue_state = BQS, + active_consumers = ActiveConsumers} = + drop_expired_messages(ensure_expiry_timer(State)), + reply({ok, BQ:len(BQS), queue:len(ActiveConsumers)}, State1); handle_call({delete, IfUnused, IfEmpty}, _From, - State = #q{message_buffer = MessageBuffer}) -> - IsEmpty = queue:is_empty(MessageBuffer), + State = #q{backing_queue_state = BQS, backing_queue = BQ}) -> + IsEmpty = BQ:is_empty(BQS), IsUnused = is_unused(State), if IfEmpty and not(IsEmpty) -> @@ -727,79 +974,85 @@ IfUnused and not(IsUnused) -> reply({error, in_use}, State); true -> - {stop, normal, {ok, queue:len(MessageBuffer)}, State} + {stop, normal, {ok, BQ:len(BQS)}, State} end; -handle_call(purge, _From, State = #q{message_buffer = MessageBuffer}) -> - ok = purge_message_buffer(qname(State), MessageBuffer), - reply({ok, queue:len(MessageBuffer)}, - State#q{message_buffer = queue:new()}); - -handle_call({claim_queue, ReaderPid}, _From, - State = #q{owner = Owner, exclusive_consumer = Holder}) -> - case Owner of - none -> - case check_exclusive_access(Holder, true, State) of - in_use -> - %% FIXME: Is this really the right answer? What if - %% an active consumer's reader is actually the - %% claiming pid? Should that be allowed? In order - %% to check, we'd need to hold not just the ch - %% pid for each consumer, but also its reader - %% pid... - reply(locked, State); - ok -> - MonitorRef = erlang:monitor(process, ReaderPid), - reply(ok, State#q{owner = {ReaderPid, MonitorRef}}) - end; - {ReaderPid, _MonitorRef} -> - reply(ok, State); - _ -> - reply(locked, State) - end. +handle_call(purge, _From, State = #q{backing_queue = BQ, + backing_queue_state = BQS}) -> + {Count, BQS1} = BQ:purge(BQS), + reply({ok, Count}, State#q{backing_queue_state = BQS1}); -handle_cast({deliver, Txn, Message, ChPid}, State) -> +handle_call({requeue, AckTags, ChPid}, From, State) -> + gen_server2:reply(From, ok), + case lookup_ch(ChPid) of + not_found -> + noreply(State); + C = #cr{acktags = ChAckTags} -> + ChAckTags1 = subtract_acks(ChAckTags, AckTags), + maybe_store_ch_record(C#cr{acktags = ChAckTags1}), + noreply(requeue_and_run(AckTags, State)) + end; + +handle_call({maybe_run_queue_via_backing_queue, Fun}, _From, State) -> + reply(ok, maybe_run_queue_via_backing_queue(Fun, State)). + + +handle_cast({maybe_run_queue_via_backing_queue, Fun}, State) -> + noreply(maybe_run_queue_via_backing_queue(Fun, State)); + +handle_cast(sync_timeout, State) -> + noreply(backing_queue_idle_timeout(State#q{sync_timer_ref = undefined})); + +handle_cast({deliver, Delivery}, State) -> %% Asynchronous, non-"mandatory", non-"immediate" deliver mode. - {_Delivered, NewState} = deliver_or_enqueue(Txn, ChPid, Message, State), + {_Delivered, NewState} = deliver_or_enqueue(Delivery, State), noreply(NewState); -handle_cast({ack, Txn, MsgIds, ChPid}, State) -> +handle_cast({ack, Txn, AckTags, ChPid}, + State = #q{backing_queue = BQ, backing_queue_state = BQS}) -> case lookup_ch(ChPid) of not_found -> noreply(State); - C = #cr{unacked_messages = UAM} -> - {Acked, Remaining} = collect_messages(MsgIds, UAM), - persist_acks(Txn, qname(State), Acked), - case Txn of - none -> - store_ch_record(C#cr{unacked_messages = Remaining}); - _ -> - record_pending_acks(Txn, ChPid, MsgIds) - end, - noreply(State) + C = #cr{acktags = ChAckTags} -> + {C1, State1} = + case Txn of + none -> ChAckTags1 = subtract_acks(ChAckTags, AckTags), + NewC = C#cr{acktags = ChAckTags1}, + BQS1 = BQ:ack(AckTags, BQS), + {NewC, State#q{backing_queue_state = BQS1}}; + _ -> BQS1 = BQ:tx_ack(Txn, AckTags, BQS), + {C#cr{txn = Txn}, + State#q{backing_queue_state = BQS1}} + end, + maybe_store_ch_record(C1), + noreply(State1) end; -handle_cast({rollback, Txn}, State) -> - ok = rollback_work(Txn, qname(State)), - erase_tx(Txn), - noreply(State); - -handle_cast({redeliver, Messages}, State) -> - noreply(deliver_or_enqueue_n(Messages, State)); - -handle_cast({requeue, MsgIds, ChPid}, State) -> +handle_cast({reject, AckTags, Requeue, ChPid}, + State = #q{backing_queue = BQ, + backing_queue_state = BQS}) -> case lookup_ch(ChPid) of not_found -> - rabbit_log:warning("Ignoring requeue from unknown ch: ~p~n", - [ChPid]), noreply(State); - C = #cr{unacked_messages = UAM} -> - {Messages, NewUAM} = collect_messages(MsgIds, UAM), - store_ch_record(C#cr{unacked_messages = NewUAM}), - noreply(deliver_or_enqueue_n( - [{Message, true} || Message <- Messages], State)) + C = #cr{acktags = ChAckTags} -> + ChAckTags1 = subtract_acks(ChAckTags, AckTags), + maybe_store_ch_record(C#cr{acktags = ChAckTags1}), + noreply(case Requeue of + true -> requeue_and_run(AckTags, State); + false -> BQS1 = BQ:ack(AckTags, BQS), + State#q{backing_queue_state = BQS1} + end) end; +handle_cast({rollback, Txn, ChPid}, State) -> + noreply(case lookup_ch(ChPid) of + not_found -> State; + C -> rollback_transaction(Txn, C, State) + end); + +handle_cast(delete_immediately, State) -> + {stop, normal, State}; + handle_cast({unblock, ChPid}, State) -> noreply( possibly_unblock(State, ChPid, @@ -826,27 +1079,86 @@ end, NewLimited = Limited andalso LimiterPid =/= undefined, C#cr{limiter_pid = LimiterPid, is_limit_active = NewLimited} - end)). + end)); + +handle_cast({flush, ChPid}, State) -> + ok = rabbit_channel:flushed(ChPid, self()), + noreply(State); + +handle_cast(update_ram_duration, State = #q{backing_queue = BQ, + backing_queue_state = BQS}) -> + {RamDuration, BQS1} = BQ:ram_duration(BQS), + DesiredDuration = + rabbit_memory_monitor:report_ram_duration(self(), RamDuration), + BQS2 = BQ:set_ram_duration_target(DesiredDuration, BQS1), + noreply(State#q{rate_timer_ref = just_measured, + backing_queue_state = BQS2}); + +handle_cast({set_ram_duration_target, Duration}, + State = #q{backing_queue = BQ, backing_queue_state = BQS}) -> + BQS1 = BQ:set_ram_duration_target(Duration, BQS), + noreply(State#q{backing_queue_state = BQS1}); -handle_info({'DOWN', MonitorRef, process, DownPid, _Reason}, - State = #q{owner = {DownPid, MonitorRef}}) -> - %% We know here that there are no consumers on this queue that are - %% owned by other pids than the one that just went down, so since - %% exclusive in some sense implies autodelete, we delete the queue - %% here. The other way of implementing the "exclusive implies - %% autodelete" feature is to actually set autodelete when an - %% exclusive declaration is seen, but this has the problem that - %% the python tests rely on the queue not going away after a - %% basic.cancel when the queue was declared exclusive and - %% nonautodelete. - NewState = State#q{owner = none}, - {stop, normal, NewState}; +handle_cast({set_maximum_since_use, Age}, State) -> + ok = file_handle_cache:set_maximum_since_use(Age), + noreply(State); + +handle_cast(maybe_expire, State) -> + case is_unused(State) of + true -> ?LOGDEBUG("Queue lease expired for ~p~n", [State#q.q]), + {stop, normal, State}; + false -> noreply(ensure_expiry_timer(State)) + end; + +handle_cast(drop_expired, State) -> + noreply(drop_expired_messages(State#q{ttl_timer_ref = undefined})); + +handle_cast(emit_stats, State = #q{stats_timer = StatsTimer}) -> + %% Do not invoke noreply as it would see no timer and create a new one. + emit_stats(State), + State1 = State#q{stats_timer = rabbit_event:reset_stats_timer(StatsTimer)}, + assert_invariant(State1), + {noreply, State1, hibernate}. + +handle_info({'DOWN', _MonitorRef, process, DownPid, _Reason}, + State = #q{q = #amqqueue{exclusive_owner = DownPid}}) -> + %% Exclusively owned queues must disappear with their owner. In + %% the case of clean shutdown we delete the queue synchronously in + %% the reader - although not required by the spec this seems to + %% match what people expect (see bug 21824). However we need this + %% monitor-and-async- delete in case the connection goes away + %% unexpectedly. + {stop, normal, State}; handle_info({'DOWN', _MonitorRef, process, DownPid, _Reason}, State) -> case handle_ch_down(DownPid, State) of {ok, NewState} -> noreply(NewState); {stop, NewState} -> {stop, normal, NewState} end; +handle_info(timeout, State) -> + noreply(backing_queue_idle_timeout(State)); + +handle_info({'EXIT', _Pid, Reason}, State) -> + {stop, Reason, State}; + handle_info(Info, State) -> ?LOGDEBUG("Info in queue: ~p~n", [Info]), {stop, {unhandled_info, Info}, State}. + +handle_pre_hibernate(State = #q{backing_queue_state = undefined}) -> + {hibernate, State}; +handle_pre_hibernate(State = #q{backing_queue = BQ, + backing_queue_state = BQS, + stats_timer = StatsTimer}) -> + {RamDuration, BQS1} = BQ:ram_duration(BQS), + DesiredDuration = + rabbit_memory_monitor:report_ram_duration(self(), RamDuration), + BQS2 = BQ:set_ram_duration_target(DesiredDuration, BQS1), + BQS3 = BQ:handle_pre_hibernate(BQS2), + rabbit_event:if_enabled(StatsTimer, + fun () -> + emit_stats(State, [{idle_since, now()}]) + end), + State1 = State#q{stats_timer = rabbit_event:stop_stats_timer(StatsTimer), + backing_queue_state = BQS3}, + {hibernate, stop_rate_timer(State1)}. diff -Nru rabbitmq-server-1.7.2/src/rabbit_amqqueue_sup.erl rabbitmq-server-2.3.1/src/rabbit_amqqueue_sup.erl --- rabbitmq-server-1.7.2/src/rabbit_amqqueue_sup.erl 2010-02-15 16:01:03.000000000 +0000 +++ rabbitmq-server-2.3.1/src/rabbit_amqqueue_sup.erl 2011-02-03 12:47:35.000000000 +0000 @@ -1,48 +1,38 @@ -%% The contents of this file are subject to the Mozilla Public License -%% Version 1.1 (the "License"); you may not use this file except in -%% compliance with the License. You may obtain a copy of the License at -%% http://www.mozilla.org/MPL/ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License +%% at http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and +%% limitations under the License. %% -%% Software distributed under the License is distributed on an "AS IS" -%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the -%% License for the specific language governing rights and limitations -%% under the License. +%% The Original Code is RabbitMQ. %% -%% The Original Code is RabbitMQ. -%% -%% The Initial Developers of the Original Code are LShift Ltd, -%% Cohesive Financial Technologies LLC, and Rabbit Technologies Ltd. -%% -%% Portions created before 22-Nov-2008 00:00:00 GMT by LShift Ltd, -%% Cohesive Financial Technologies LLC, or Rabbit Technologies Ltd -%% are Copyright (C) 2007-2008 LShift Ltd, Cohesive Financial -%% Technologies LLC, and Rabbit Technologies Ltd. -%% -%% Portions created by LShift Ltd are Copyright (C) 2007-2010 LShift -%% Ltd. Portions created by Cohesive Financial Technologies LLC are -%% Copyright (C) 2007-2010 Cohesive Financial Technologies -%% LLC. Portions created by Rabbit Technologies Ltd are Copyright -%% (C) 2007-2010 Rabbit Technologies Ltd. -%% -%% All Rights Reserved. -%% -%% Contributor(s): ______________________________________. +%% The Initial Developer of the Original Code is VMware, Inc. +%% Copyright (c) 2007-2011 VMware, Inc. All rights reserved. %% -module(rabbit_amqqueue_sup). --behaviour(supervisor). +-behaviour(supervisor2). --export([start_link/0]). +-export([start_link/0, start_child/1]). -export([init/1]). +-include("rabbit.hrl"). + -define(SERVER, ?MODULE). start_link() -> - supervisor:start_link({local, ?SERVER}, ?MODULE, []). + supervisor2:start_link({local, ?SERVER}, ?MODULE, []). + +start_child(Args) -> + supervisor2:start_child(?SERVER, Args). init([]) -> - {ok, {{simple_one_for_one, 10, 10}, + {ok, {{simple_one_for_one_terminate, 10, 10}, [{rabbit_amqqueue, {rabbit_amqqueue_process, start_link, []}, - temporary, brutal_kill, worker, [rabbit_amqqueue_process]}]}}. + temporary, ?MAX_WAIT, worker, [rabbit_amqqueue_process]}]}}. diff -Nru rabbitmq-server-1.7.2/src/rabbit_auth_backend.erl rabbitmq-server-2.3.1/src/rabbit_auth_backend.erl --- rabbitmq-server-1.7.2/src/rabbit_auth_backend.erl 1970-01-01 00:00:00.000000000 +0000 +++ rabbitmq-server-2.3.1/src/rabbit_auth_backend.erl 2011-02-03 12:47:35.000000000 +0000 @@ -0,0 +1,61 @@ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License +%% at http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and +%% limitations under the License. +%% +%% The Original Code is RabbitMQ. +%% +%% The Initial Developer of the Original Code is VMware, Inc. +%% Copyright (c) 2007-2011 VMware, Inc. All rights reserved. +%% + +-module(rabbit_auth_backend). + +-export([behaviour_info/1]). + +behaviour_info(callbacks) -> + [ + %% A description proplist as with auth mechanisms, + %% exchanges. Currently unused. + {description, 0}, + + %% Check a user can log in, given a username and a proplist of + %% authentication information (e.g. [{password, Password}]). + %% + %% Possible responses: + %% {ok, User} + %% Authentication succeeded, and here's the user record. + %% {error, Error} + %% Something went wrong. Log and die. + %% {refused, Msg, Args} + %% Client failed authentication. Log and die. + {check_user_login, 2}, + + %% Given #user, vhost path and permission, can a user access a vhost? + %% Permission is read - learn of the existence of (only relevant for + %% management plugin) + %% or write - log in + %% + %% Possible responses: + %% true + %% false + %% {error, Error} + %% Something went wrong. Log and die. + {check_vhost_access, 3}, + + %% Given #user, resource and permission, can a user access a resource? + %% + %% Possible responses: + %% true + %% false + %% {error, Error} + %% Something went wrong. Log and die. + {check_resource_access, 3} + ]; +behaviour_info(_Other) -> + undefined. diff -Nru rabbitmq-server-1.7.2/src/rabbit_auth_backend_internal.erl rabbitmq-server-2.3.1/src/rabbit_auth_backend_internal.erl --- rabbitmq-server-1.7.2/src/rabbit_auth_backend_internal.erl 1970-01-01 00:00:00.000000000 +0000 +++ rabbitmq-server-2.3.1/src/rabbit_auth_backend_internal.erl 2011-02-03 12:47:35.000000000 +0000 @@ -0,0 +1,332 @@ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License +%% at http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and +%% limitations under the License. +%% +%% The Original Code is RabbitMQ. +%% +%% The Initial Developer of the Original Code is VMware, Inc. +%% Copyright (c) 2007-2011 VMware, Inc. All rights reserved. +%% + +-module(rabbit_auth_backend_internal). +-include("rabbit.hrl"). + +-behaviour(rabbit_auth_backend). + +-export([description/0]). +-export([check_user_login/2, check_vhost_access/3, check_resource_access/3]). + +-export([add_user/2, delete_user/1, change_password/2, set_admin/1, + clear_admin/1, list_users/0, lookup_user/1, clear_password/1]). +-export([make_salt/0, check_password/2, change_password_hash/2, + hash_password/1]). +-export([set_permissions/5, clear_permissions/2, + list_permissions/0, list_vhost_permissions/1, list_user_permissions/1, + list_user_vhost_permissions/2]). + +-include("rabbit_auth_backend_spec.hrl"). + +-ifdef(use_specs). + +-type(regexp() :: binary()). + +-spec(add_user/2 :: (rabbit_types:username(), rabbit_types:password()) -> 'ok'). +-spec(delete_user/1 :: (rabbit_types:username()) -> 'ok'). +-spec(change_password/2 :: (rabbit_types:username(), rabbit_types:password()) + -> 'ok'). +-spec(clear_password/1 :: (rabbit_types:username()) -> 'ok'). +-spec(make_salt/0 :: () -> binary()). +-spec(check_password/2 :: (rabbit_types:password(), + rabbit_types:password_hash()) -> boolean()). +-spec(change_password_hash/2 :: (rabbit_types:username(), + rabbit_types:password_hash()) -> 'ok'). +-spec(hash_password/1 :: (rabbit_types:password()) + -> rabbit_types:password_hash()). +-spec(set_admin/1 :: (rabbit_types:username()) -> 'ok'). +-spec(clear_admin/1 :: (rabbit_types:username()) -> 'ok'). +-spec(list_users/0 :: () -> [{rabbit_types:username(), boolean()}]). +-spec(lookup_user/1 :: (rabbit_types:username()) + -> rabbit_types:ok(rabbit_types:internal_user()) + | rabbit_types:error('not_found')). +-spec(set_permissions/5 ::(rabbit_types:username(), rabbit_types:vhost(), + regexp(), regexp(), regexp()) -> 'ok'). +-spec(clear_permissions/2 :: (rabbit_types:username(), rabbit_types:vhost()) + -> 'ok'). +-spec(list_permissions/0 :: + () -> [{rabbit_types:username(), rabbit_types:vhost(), + regexp(), regexp(), regexp()}]). +-spec(list_vhost_permissions/1 :: + (rabbit_types:vhost()) -> [{rabbit_types:username(), + regexp(), regexp(), regexp()}]). +-spec(list_user_permissions/1 :: + (rabbit_types:username()) -> [{rabbit_types:vhost(), + regexp(), regexp(), regexp()}]). +-spec(list_user_vhost_permissions/2 :: + (rabbit_types:username(), rabbit_types:vhost()) + -> [{regexp(), regexp(), regexp()}]). + +-endif. + +%%---------------------------------------------------------------------------- + +%% Implementation of rabbit_auth_backend + +description() -> + [{name, <<"Internal">>}, + {description, <<"Internal user / password database">>}]. + +check_user_login(Username, []) -> + internal_check_user_login(Username, fun(_) -> true end); +check_user_login(Username, [{password, Password}]) -> + internal_check_user_login( + Username, + fun(#internal_user{password_hash = Hash}) -> + check_password(Password, Hash) + end); +check_user_login(Username, AuthProps) -> + exit({unknown_auth_props, Username, AuthProps}). + +internal_check_user_login(Username, Fun) -> + Refused = {refused, "user '~s' - invalid credentials", [Username]}, + case lookup_user(Username) of + {ok, User = #internal_user{is_admin = IsAdmin}} -> + case Fun(User) of + true -> {ok, #user{username = Username, + is_admin = IsAdmin, + auth_backend = ?MODULE, + impl = User}}; + _ -> Refused + end; + {error, not_found} -> + Refused + end. + +check_vhost_access(#user{is_admin = true}, _VHostPath, read) -> + true; + +check_vhost_access(#user{username = Username}, VHostPath, _) -> + %% TODO: use dirty ops instead + rabbit_misc:execute_mnesia_transaction( + fun () -> + case mnesia:read({rabbit_user_permission, + #user_vhost{username = Username, + virtual_host = VHostPath}}) of + [] -> false; + [_R] -> true + end + end). + +check_resource_access(#user{username = Username}, + #resource{virtual_host = VHostPath, name = Name}, + Permission) -> + case mnesia:dirty_read({rabbit_user_permission, + #user_vhost{username = Username, + virtual_host = VHostPath}}) of + [] -> + false; + [#user_permission{permission = P}] -> + PermRegexp = + case element(permission_index(Permission), P) of + %% <<"^$">> breaks Emacs' erlang mode + <<"">> -> <<$^, $$>>; + RE -> RE + end, + case re:run(Name, PermRegexp, [{capture, none}]) of + match -> true; + nomatch -> false + end + end. + +permission_index(configure) -> #permission.configure; +permission_index(write) -> #permission.write; +permission_index(read) -> #permission.read. + +%%---------------------------------------------------------------------------- +%% Manipulation of the user database + +add_user(Username, Password) -> + R = rabbit_misc:execute_mnesia_transaction( + fun () -> + case mnesia:wread({rabbit_user, Username}) of + [] -> + ok = mnesia:write( + rabbit_user, + #internal_user{username = Username, + password_hash = + hash_password(Password), + is_admin = false}, + write); + _ -> + mnesia:abort({user_already_exists, Username}) + end + end), + rabbit_log:info("Created user ~p~n", [Username]), + R. + +delete_user(Username) -> + R = rabbit_misc:execute_mnesia_transaction( + rabbit_misc:with_user( + Username, + fun () -> + ok = mnesia:delete({rabbit_user, Username}), + [ok = mnesia:delete_object( + rabbit_user_permission, R, write) || + R <- mnesia:match_object( + rabbit_user_permission, + #user_permission{user_vhost = #user_vhost{ + username = Username, + virtual_host = '_'}, + permission = '_'}, + write)], + ok + end)), + rabbit_log:info("Deleted user ~p~n", [Username]), + R. + +change_password(Username, Password) -> + change_password_hash(Username, hash_password(Password)). + +clear_password(Username) -> + change_password_hash(Username, <<"">>). + +change_password_hash(Username, PasswordHash) -> + R = update_user(Username, fun(User) -> + User#internal_user{ + password_hash = PasswordHash } + end), + rabbit_log:info("Changed password for user ~p~n", [Username]), + R. + +hash_password(Cleartext) -> + Salt = make_salt(), + Hash = salted_md5(Salt, Cleartext), + <>. + +check_password(Cleartext, <>) -> + Hash =:= salted_md5(Salt, Cleartext). + +make_salt() -> + {A1,A2,A3} = now(), + random:seed(A1, A2, A3), + Salt = random:uniform(16#ffffffff), + <>. + +salted_md5(Salt, Cleartext) -> + Salted = <>, + erlang:md5(Salted). + +set_admin(Username) -> + set_admin(Username, true). + +clear_admin(Username) -> + set_admin(Username, false). + +set_admin(Username, IsAdmin) -> + R = update_user(Username, fun(User) -> + User#internal_user{is_admin = IsAdmin} + end), + rabbit_log:info("Set user admin flag for user ~p to ~p~n", + [Username, IsAdmin]), + R. + +update_user(Username, Fun) -> + rabbit_misc:execute_mnesia_transaction( + rabbit_misc:with_user( + Username, + fun () -> + {ok, User} = lookup_user(Username), + ok = mnesia:write(rabbit_user, Fun(User), write) + end)). + +list_users() -> + [{Username, IsAdmin} || + #internal_user{username = Username, is_admin = IsAdmin} <- + mnesia:dirty_match_object(rabbit_user, #internal_user{_ = '_'})]. + +lookup_user(Username) -> + rabbit_misc:dirty_read({rabbit_user, Username}). + +validate_regexp(RegexpBin) -> + Regexp = binary_to_list(RegexpBin), + case re:compile(Regexp) of + {ok, _} -> ok; + {error, Reason} -> throw({error, {invalid_regexp, Regexp, Reason}}) + end. + +set_permissions(Username, VHostPath, ConfigurePerm, WritePerm, ReadPerm) -> + lists:map(fun validate_regexp/1, [ConfigurePerm, WritePerm, ReadPerm]), + rabbit_misc:execute_mnesia_transaction( + rabbit_misc:with_user_and_vhost( + Username, VHostPath, + fun () -> ok = mnesia:write( + rabbit_user_permission, + #user_permission{user_vhost = #user_vhost{ + username = Username, + virtual_host = VHostPath}, + permission = #permission{ + configure = ConfigurePerm, + write = WritePerm, + read = ReadPerm}}, + write) + end)). + + +clear_permissions(Username, VHostPath) -> + rabbit_misc:execute_mnesia_transaction( + rabbit_misc:with_user_and_vhost( + Username, VHostPath, + fun () -> + ok = mnesia:delete({rabbit_user_permission, + #user_vhost{username = Username, + virtual_host = VHostPath}}) + end)). + +list_permissions() -> + [{Username, VHostPath, ConfigurePerm, WritePerm, ReadPerm} || + {Username, VHostPath, ConfigurePerm, WritePerm, ReadPerm} <- + list_permissions(match_user_vhost('_', '_'))]. + +list_vhost_permissions(VHostPath) -> + [{Username, ConfigurePerm, WritePerm, ReadPerm} || + {Username, _, ConfigurePerm, WritePerm, ReadPerm} <- + list_permissions(rabbit_vhost:with( + VHostPath, match_user_vhost('_', VHostPath)))]. + +list_user_permissions(Username) -> + [{VHostPath, ConfigurePerm, WritePerm, ReadPerm} || + {_, VHostPath, ConfigurePerm, WritePerm, ReadPerm} <- + list_permissions(rabbit_misc:with_user( + Username, match_user_vhost(Username, '_')))]. + +list_user_vhost_permissions(Username, VHostPath) -> + [{ConfigurePerm, WritePerm, ReadPerm} || + {_, _, ConfigurePerm, WritePerm, ReadPerm} <- + list_permissions(rabbit_misc:with_user_and_vhost( + Username, VHostPath, + match_user_vhost(Username, VHostPath)))]. + +list_permissions(QueryThunk) -> + [{Username, VHostPath, ConfigurePerm, WritePerm, ReadPerm} || + #user_permission{user_vhost = #user_vhost{username = Username, + virtual_host = VHostPath}, + permission = #permission{ configure = ConfigurePerm, + write = WritePerm, + read = ReadPerm}} <- + %% TODO: use dirty ops instead + rabbit_misc:execute_mnesia_transaction(QueryThunk)]. + +match_user_vhost(Username, VHostPath) -> + fun () -> mnesia:match_object( + rabbit_user_permission, + #user_permission{user_vhost = #user_vhost{ + username = Username, + virtual_host = VHostPath}, + permission = '_'}, + read) + end. diff -Nru rabbitmq-server-1.7.2/src/rabbit_auth_mechanism_amqplain.erl rabbitmq-server-2.3.1/src/rabbit_auth_mechanism_amqplain.erl --- rabbitmq-server-1.7.2/src/rabbit_auth_mechanism_amqplain.erl 1970-01-01 00:00:00.000000000 +0000 +++ rabbitmq-server-2.3.1/src/rabbit_auth_mechanism_amqplain.erl 2011-02-03 12:47:35.000000000 +0000 @@ -0,0 +1,55 @@ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License +%% at http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and +%% limitations under the License. +%% +%% The Original Code is RabbitMQ. +%% +%% The Initial Developer of the Original Code is VMware, Inc. +%% Copyright (c) 2007-2011 VMware, Inc. All rights reserved. +%% + +-module(rabbit_auth_mechanism_amqplain). +-include("rabbit.hrl"). + +-behaviour(rabbit_auth_mechanism). + +-export([description/0, init/1, handle_response/2]). + +-include("rabbit_auth_mechanism_spec.hrl"). + +-rabbit_boot_step({?MODULE, + [{description, "auth mechanism amqplain"}, + {mfa, {rabbit_registry, register, + [auth_mechanism, <<"AMQPLAIN">>, ?MODULE]}}, + {requires, rabbit_registry}, + {enables, kernel_ready}]}). + +%% AMQPLAIN, as used by Qpid Python test suite. The 0-8 spec actually +%% defines this as PLAIN, but in 0-9 that definition is gone, instead +%% referring generically to "SASL security mechanism", i.e. the above. + +description() -> + [{name, <<"AMQPLAIN">>}, + {description, <<"QPid AMQPLAIN mechanism">>}]. + +init(_Sock) -> + []. + +handle_response(Response, _State) -> + LoginTable = rabbit_binary_parser:parse_table(Response), + case {lists:keysearch(<<"LOGIN">>, 1, LoginTable), + lists:keysearch(<<"PASSWORD">>, 1, LoginTable)} of + {{value, {_, longstr, User}}, + {value, {_, longstr, Pass}}} -> + rabbit_access_control:check_user_pass_login(User, Pass); + _ -> + {protocol_error, + "AMQPLAIN auth info ~w is missing LOGIN or PASSWORD field", + [LoginTable]} + end. diff -Nru rabbitmq-server-1.7.2/src/rabbit_auth_mechanism_cr_demo.erl rabbitmq-server-2.3.1/src/rabbit_auth_mechanism_cr_demo.erl --- rabbitmq-server-1.7.2/src/rabbit_auth_mechanism_cr_demo.erl 1970-01-01 00:00:00.000000000 +0000 +++ rabbitmq-server-2.3.1/src/rabbit_auth_mechanism_cr_demo.erl 2011-02-03 12:47:35.000000000 +0000 @@ -0,0 +1,59 @@ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License +%% at http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and +%% limitations under the License. +%% +%% The Original Code is RabbitMQ. +%% +%% The Initial Developer of the Original Code is VMware, Inc. +%% Copyright (c) 2007-2011 VMware, Inc. All rights reserved. +%% + +-module(rabbit_auth_mechanism_cr_demo). +-include("rabbit.hrl"). + +-behaviour(rabbit_auth_mechanism). + +-export([description/0, init/1, handle_response/2]). + +-include("rabbit_auth_mechanism_spec.hrl"). + +-rabbit_boot_step({?MODULE, + [{description, "auth mechanism cr-demo"}, + {mfa, {rabbit_registry, register, + [auth_mechanism, <<"RABBIT-CR-DEMO">>, + ?MODULE]}}, + {requires, rabbit_registry}, + {enables, kernel_ready}]}). + +-record(state, {username = undefined}). + +%% Provides equivalent security to PLAIN but demos use of Connection.Secure(Ok) +%% START-OK: Username +%% SECURE: "Please tell me your password" +%% SECURE-OK: "My password is ~s", [Password] + +description() -> + [{name, <<"RABBIT-CR-DEMO">>}, + {description, <<"RabbitMQ Demo challenge-response authentication " + "mechanism">>}]. + +init(_Sock) -> + #state{}. + +handle_response(Response, State = #state{username = undefined}) -> + {challenge, <<"Please tell me your password">>, + State#state{username = Response}}; + +handle_response(Response, #state{username = Username}) -> + case Response of + <<"My password is ", Password/binary>> -> + rabbit_access_control:check_user_pass_login(Username, Password); + _ -> + {protocol_error, "Invalid response '~s'", [Response]} + end. diff -Nru rabbitmq-server-1.7.2/src/rabbit_auth_mechanism.erl rabbitmq-server-2.3.1/src/rabbit_auth_mechanism.erl --- rabbitmq-server-1.7.2/src/rabbit_auth_mechanism.erl 1970-01-01 00:00:00.000000000 +0000 +++ rabbitmq-server-2.3.1/src/rabbit_auth_mechanism.erl 2011-02-03 12:47:35.000000000 +0000 @@ -0,0 +1,42 @@ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License +%% at http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and +%% limitations under the License. +%% +%% The Original Code is RabbitMQ. +%% +%% The Initial Developer of the Original Code is VMware, Inc. +%% Copyright (c) 2007-2011 VMware, Inc. All rights reserved. +%% + +-module(rabbit_auth_mechanism). + +-export([behaviour_info/1]). + +behaviour_info(callbacks) -> + [ + %% A description. + {description, 0}, + + %% Called before authentication starts. Should create a state + %% object to be passed through all the stages of authentication. + {init, 1}, + + %% Handle a stage of authentication. Possible responses: + %% {ok, User} + %% Authentication succeeded, and here's the user record. + %% {challenge, Challenge, NextState} + %% Another round is needed. Here's the state I want next time. + %% {protocol_error, Msg, Args} + %% Client got the protocol wrong. Log and die. + %% {refused, Msg, Args} + %% Client failed authentication. Log and die. + {handle_response, 2} + ]; +behaviour_info(_Other) -> + undefined. diff -Nru rabbitmq-server-1.7.2/src/rabbit_auth_mechanism_plain.erl rabbitmq-server-2.3.1/src/rabbit_auth_mechanism_plain.erl --- rabbitmq-server-1.7.2/src/rabbit_auth_mechanism_plain.erl 1970-01-01 00:00:00.000000000 +0000 +++ rabbitmq-server-2.3.1/src/rabbit_auth_mechanism_plain.erl 2011-02-03 12:47:35.000000000 +0000 @@ -0,0 +1,76 @@ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License +%% at http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and +%% limitations under the License. +%% +%% The Original Code is RabbitMQ. +%% +%% The Initial Developer of the Original Code is VMware, Inc. +%% Copyright (c) 2007-2011 VMware, Inc. All rights reserved. +%% + +-module(rabbit_auth_mechanism_plain). +-include("rabbit.hrl"). + +-behaviour(rabbit_auth_mechanism). + +-export([description/0, init/1, handle_response/2]). + +-include("rabbit_auth_mechanism_spec.hrl"). + +-rabbit_boot_step({?MODULE, + [{description, "auth mechanism plain"}, + {mfa, {rabbit_registry, register, + [auth_mechanism, <<"PLAIN">>, ?MODULE]}}, + {requires, rabbit_registry}, + {enables, kernel_ready}]}). + +%% SASL PLAIN, as used by the Qpid Java client and our clients. Also, +%% apparently, by OpenAMQ. + +%% TODO: once the minimum erlang becomes R13B03, reimplement this +%% using the binary module - that makes use of BIFs to do binary +%% matching and will thus be much faster. + +description() -> + [{name, <<"PLAIN">>}, + {description, <<"SASL PLAIN authentication mechanism">>}]. + +init(_Sock) -> + []. + +handle_response(Response, _State) -> + case extract_user_pass(Response) of + {ok, User, Pass} -> + rabbit_access_control:check_user_pass_login(User, Pass); + error -> + {protocol_error, "response ~p invalid", [Response]} + end. + +extract_user_pass(Response) -> + case extract_elem(Response) of + {ok, User, Response1} -> case extract_elem(Response1) of + {ok, Pass, <<>>} -> {ok, User, Pass}; + _ -> error + end; + error -> error + end. + +extract_elem(<<0:8, Rest/binary>>) -> + Count = next_null_pos(Rest), + <> = Rest, + {ok, Elem, Rest1}; +extract_elem(_) -> + error. + +next_null_pos(Bin) -> + next_null_pos(Bin, 0). + +next_null_pos(<<>>, Count) -> Count; +next_null_pos(<<0:8, _Rest/binary>>, Count) -> Count; +next_null_pos(<<_:8, Rest/binary>>, Count) -> next_null_pos(Rest, Count + 1). diff -Nru rabbitmq-server-1.7.2/src/rabbit_backing_queue.erl rabbitmq-server-2.3.1/src/rabbit_backing_queue.erl --- rabbitmq-server-1.7.2/src/rabbit_backing_queue.erl 1970-01-01 00:00:00.000000000 +0000 +++ rabbitmq-server-2.3.1/src/rabbit_backing_queue.erl 2011-02-03 12:47:35.000000000 +0000 @@ -0,0 +1,128 @@ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License +%% at http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and +%% limitations under the License. +%% +%% The Original Code is RabbitMQ. +%% +%% The Initial Developer of the Original Code is VMware, Inc. +%% Copyright (c) 2007-2011 VMware, Inc. All rights reserved. +%% + +-module(rabbit_backing_queue). + +-export([behaviour_info/1]). + +behaviour_info(callbacks) -> + [ + %% Called on startup with a list of durable queue names. The + %% queues aren't being started at this point, but this call + %% allows the backing queue to perform any checking necessary for + %% the consistency of those queues, or initialise any other + %% shared resources. + {start, 1}, + + %% Called to tear down any state/resources. NB: Implementations + %% should not depend on this function being called on shutdown + %% and instead should hook into the rabbit supervision hierarchy. + {stop, 0}, + + %% Initialise the backing queue and its state. + {init, 3}, + + %% Called on queue shutdown when queue isn't being deleted. + {terminate, 1}, + + %% Called when the queue is terminating and needs to delete all + %% its content. + {delete_and_terminate, 1}, + + %% Remove all messages in the queue, but not messages which have + %% been fetched and are pending acks. + {purge, 1}, + + %% Publish a message. + {publish, 3}, + + %% Called for messages which have already been passed straight + %% out to a client. The queue will be empty for these calls + %% (i.e. saves the round trip through the backing queue). + {publish_delivered, 4}, + + %% Drop messages from the head of the queue while the supplied + %% predicate returns true. + {dropwhile, 2}, + + %% Produce the next message. + {fetch, 2}, + + %% Acktags supplied are for messages which can now be forgotten + %% about. Must return 1 guid per Ack, in the same order as Acks. + {ack, 2}, + + %% A publish, but in the context of a transaction. + {tx_publish, 4}, + + %% Acks, but in the context of a transaction. + {tx_ack, 3}, + + %% Undo anything which has been done in the context of the + %% specified transaction. + {tx_rollback, 2}, + + %% Commit a transaction. The Fun passed in must be called once + %% the messages have really been commited. This CPS permits the + %% possibility of commit coalescing. + {tx_commit, 4}, + + %% Reinsert messages into the queue which have already been + %% delivered and were pending acknowledgement. + {requeue, 3}, + + %% How long is my queue? + {len, 1}, + + %% Is my queue empty? + {is_empty, 1}, + + %% For the next three functions, the assumption is that you're + %% monitoring something like the ingress and egress rates of the + %% queue. The RAM duration is thus the length of time represented + %% by the messages held in RAM given the current rates. If you + %% want to ignore all of this stuff, then do so, and return 0 in + %% ram_duration/1. + + %% The target is to have no more messages in RAM than indicated + %% by the duration and the current queue rates. + {set_ram_duration_target, 2}, + + %% Optionally recalculate the duration internally (likely to be + %% just update your internal rates), and report how many seconds + %% the messages in RAM represent given the current rates of the + %% queue. + {ram_duration, 1}, + + %% Should 'idle_timeout' be called as soon as the queue process + %% can manage (either on an empty mailbox, or when a timer + %% fires)? + {needs_idle_timeout, 1}, + + %% Called (eventually) after needs_idle_timeout returns + %% 'true'. Note this may be called more than once for each 'true' + %% returned from needs_idle_timeout. + {idle_timeout, 1}, + + %% Called immediately before the queue hibernates. + {handle_pre_hibernate, 1}, + + %% Exists for debugging purposes, to be able to expose state via + %% rabbitmqctl list_queues backing_queue_status + {status, 1} + ]; +behaviour_info(_Other) -> + undefined. diff -Nru rabbitmq-server-1.7.2/src/rabbit_basic.erl rabbitmq-server-2.3.1/src/rabbit_basic.erl --- rabbitmq-server-1.7.2/src/rabbit_basic.erl 2010-02-15 16:01:03.000000000 +0000 +++ rabbitmq-server-2.3.1/src/rabbit_basic.erl 2011-02-03 12:47:35.000000000 +0000 @@ -1,62 +1,64 @@ -%% The contents of this file are subject to the Mozilla Public License -%% Version 1.1 (the "License"); you may not use this file except in -%% compliance with the License. You may obtain a copy of the License at -%% http://www.mozilla.org/MPL/ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License +%% at http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and +%% limitations under the License. %% -%% Software distributed under the License is distributed on an "AS IS" -%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the -%% License for the specific language governing rights and limitations -%% under the License. +%% The Original Code is RabbitMQ. %% -%% The Original Code is RabbitMQ. -%% -%% The Initial Developers of the Original Code are LShift Ltd, -%% Cohesive Financial Technologies LLC, and Rabbit Technologies Ltd. -%% -%% Portions created before 22-Nov-2008 00:00:00 GMT by LShift Ltd, -%% Cohesive Financial Technologies LLC, or Rabbit Technologies Ltd -%% are Copyright (C) 2007-2008 LShift Ltd, Cohesive Financial -%% Technologies LLC, and Rabbit Technologies Ltd. -%% -%% Portions created by LShift Ltd are Copyright (C) 2007-2010 LShift -%% Ltd. Portions created by Cohesive Financial Technologies LLC are -%% Copyright (C) 2007-2010 Cohesive Financial Technologies -%% LLC. Portions created by Rabbit Technologies Ltd are Copyright -%% (C) 2007-2010 Rabbit Technologies Ltd. -%% -%% All Rights Reserved. -%% -%% Contributor(s): ______________________________________. +%% The Initial Developer of the Original Code is VMware, Inc. +%% Copyright (c) 2007-2011 VMware, Inc. All rights reserved. %% -module(rabbit_basic). -include("rabbit.hrl"). -include("rabbit_framing.hrl"). --export([publish/1, message/4, properties/1, delivery/4]). +-export([publish/1, message/4, properties/1, delivery/5]). -export([publish/4, publish/7]). -export([build_content/2, from_content/1]). +-export([is_message_persistent/1]). %%---------------------------------------------------------------------------- -ifdef(use_specs). --type(properties_input() :: (amqp_properties() | [{atom(), any()}])). --type(publish_result() :: ({ok, routing_result(), [pid()]} | not_found())). - --spec(publish/1 :: (delivery()) -> publish_result()). --spec(delivery/4 :: (boolean(), boolean(), maybe(txn()), message()) -> - delivery()). --spec(message/4 :: (exchange_name(), routing_key(), properties_input(), - binary()) -> message()). --spec(properties/1 :: (properties_input()) -> amqp_properties()). --spec(publish/4 :: (exchange_name(), routing_key(), properties_input(), - binary()) -> publish_result()). --spec(publish/7 :: (exchange_name(), routing_key(), boolean(), boolean(), - maybe(txn()), properties_input(), binary()) -> - publish_result()). --spec(build_content/2 :: (amqp_properties(), binary()) -> content()). --spec(from_content/1 :: (content()) -> {amqp_properties(), binary()}). +-type(properties_input() :: + (rabbit_framing:amqp_property_record() | [{atom(), any()}])). +-type(publish_result() :: + ({ok, rabbit_router:routing_result(), [pid()]} + | rabbit_types:error('not_found'))). + +-spec(publish/1 :: + (rabbit_types:delivery()) -> publish_result()). +-spec(delivery/5 :: + (boolean(), boolean(), rabbit_types:maybe(rabbit_types:txn()), + rabbit_types:message(), undefined | integer()) -> + rabbit_types:delivery()). +-spec(message/4 :: + (rabbit_exchange:name(), rabbit_router:routing_key(), + properties_input(), binary()) -> + (rabbit_types:message() | rabbit_types:error(any()))). +-spec(properties/1 :: + (properties_input()) -> rabbit_framing:amqp_property_record()). +-spec(publish/4 :: + (rabbit_exchange:name(), rabbit_router:routing_key(), + properties_input(), binary()) -> publish_result()). +-spec(publish/7 :: + (rabbit_exchange:name(), rabbit_router:routing_key(), + boolean(), boolean(), rabbit_types:maybe(rabbit_types:txn()), + properties_input(), binary()) -> publish_result()). +-spec(build_content/2 :: (rabbit_framing:amqp_property_record(), binary()) -> + rabbit_types:content()). +-spec(from_content/1 :: (rabbit_types:content()) -> + {rabbit_framing:amqp_property_record(), binary()}). +-spec(is_message_persistent/1 :: (rabbit_types:decoded_content()) -> + (boolean() | + {'invalid', non_neg_integer()})). -endif. @@ -72,15 +74,18 @@ Other end. -delivery(Mandatory, Immediate, Txn, Message) -> +delivery(Mandatory, Immediate, Txn, Message, MsgSeqNo) -> #delivery{mandatory = Mandatory, immediate = Immediate, txn = Txn, - sender = self(), message = Message}. + sender = self(), message = Message, msg_seq_no = MsgSeqNo}. build_content(Properties, BodyBin) -> - {ClassId, _MethodId} = rabbit_framing:method_id('basic.publish'), + %% basic.publish hasn't changed so we can just hard-code amqp_0_9_1 + {ClassId, _MethodId} = + rabbit_framing_amqp_0_9_1:method_id('basic.publish'), #content{class_id = ClassId, properties = Properties, properties_bin = none, + protocol = none, payload_fragments_rev = [BodyBin]}. from_content(Content) -> @@ -88,15 +93,24 @@ properties = Props, payload_fragments_rev = FragmentsRev} = rabbit_binary_parser:ensure_content_decoded(Content), - {ClassId, _MethodId} = rabbit_framing:method_id('basic.publish'), + %% basic.publish hasn't changed so we can just hard-code amqp_0_9_1 + {ClassId, _MethodId} = + rabbit_framing_amqp_0_9_1:method_id('basic.publish'), {Props, list_to_binary(lists:reverse(FragmentsRev))}. message(ExchangeName, RoutingKeyBin, RawProperties, BodyBin) -> Properties = properties(RawProperties), - #basic_message{exchange_name = ExchangeName, - routing_key = RoutingKeyBin, - content = build_content(Properties, BodyBin), - persistent_key = none}. + Content = build_content(Properties, BodyBin), + case is_message_persistent(Content) of + {invalid, Other} -> + {error, {invalid_delivery_mode, Other}}; + IsPersistent when is_boolean(IsPersistent) -> + #basic_message{exchange_name = ExchangeName, + routing_key = RoutingKeyBin, + content = Content, + guid = rabbit_guid:guid(), + is_persistent = IsPersistent} + end. properties(P = #'P_basic'{}) -> P; @@ -129,4 +143,14 @@ BodyBin) -> publish(delivery(Mandatory, Immediate, Txn, message(ExchangeName, RoutingKeyBin, - properties(Properties), BodyBin))). + properties(Properties), BodyBin), + undefined)). + +is_message_persistent(#content{properties = #'P_basic'{ + delivery_mode = Mode}}) -> + case Mode of + 1 -> false; + 2 -> true; + undefined -> false; + Other -> {invalid, Other} + end. diff -Nru rabbitmq-server-1.7.2/src/rabbit_binary_generator.erl rabbitmq-server-2.3.1/src/rabbit_binary_generator.erl --- rabbitmq-server-1.7.2/src/rabbit_binary_generator.erl 2010-02-15 16:01:03.000000000 +0000 +++ rabbitmq-server-2.3.1/src/rabbit_binary_generator.erl 2011-02-03 12:47:35.000000000 +0000 @@ -1,32 +1,17 @@ -%% The contents of this file are subject to the Mozilla Public License -%% Version 1.1 (the "License"); you may not use this file except in -%% compliance with the License. You may obtain a copy of the License at -%% http://www.mozilla.org/MPL/ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License +%% at http://www.mozilla.org/MPL/ %% -%% Software distributed under the License is distributed on an "AS IS" -%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the -%% License for the specific language governing rights and limitations -%% under the License. +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and +%% limitations under the License. %% -%% The Original Code is RabbitMQ. +%% The Original Code is RabbitMQ. %% -%% The Initial Developers of the Original Code are LShift Ltd, -%% Cohesive Financial Technologies LLC, and Rabbit Technologies Ltd. -%% -%% Portions created before 22-Nov-2008 00:00:00 GMT by LShift Ltd, -%% Cohesive Financial Technologies LLC, or Rabbit Technologies Ltd -%% are Copyright (C) 2007-2008 LShift Ltd, Cohesive Financial -%% Technologies LLC, and Rabbit Technologies Ltd. -%% -%% Portions created by LShift Ltd are Copyright (C) 2007-2010 LShift -%% Ltd. Portions created by Cohesive Financial Technologies LLC are -%% Copyright (C) 2007-2010 Cohesive Financial Technologies -%% LLC. Portions created by Rabbit Technologies Ltd are Copyright -%% (C) 2007-2010 Rabbit Technologies Ltd. -%% -%% All Rights Reserved. -%% -%% Contributor(s): ______________________________________. +%% The Initial Developer of the Original Code is VMware, Inc. +%% Copyright (c) 2007-2011 VMware, Inc. All rights reserved. %% -module(rabbit_binary_generator). @@ -41,14 +26,13 @@ % See definition of check_empty_content_body_frame_size/0, an assertion called at startup. -define(EMPTY_CONTENT_BODY_FRAME_SIZE, 8). --export([build_simple_method_frame/2, - build_simple_content_frames/3, +-export([build_simple_method_frame/3, + build_simple_content_frames/4, build_heartbeat_frame/0]). -export([generate_table/1, encode_properties/2]). -export([check_empty_content_body_frame_size/0]). --export([ensure_content_encoded/1, clear_encoded_content/1]). - --import(lists). +-export([ensure_content_encoded/2, clear_encoded_content/1]). +-export([map_exception/3]). %%---------------------------------------------------------------------------- @@ -56,72 +40,84 @@ -type(frame() :: [binary()]). --spec(build_simple_method_frame/2 :: - (channel_number(), amqp_method()) -> frame()). --spec(build_simple_content_frames/3 :: - (channel_number(), content(), non_neg_integer()) -> [frame()]). +-spec(build_simple_method_frame/3 :: + (rabbit_channel:channel_number(), rabbit_framing:amqp_method_record(), + rabbit_types:protocol()) + -> frame()). +-spec(build_simple_content_frames/4 :: + (rabbit_channel:channel_number(), rabbit_types:content(), + non_neg_integer(), rabbit_types:protocol()) + -> [frame()]). -spec(build_heartbeat_frame/0 :: () -> frame()). --spec(generate_table/1 :: (amqp_table()) -> binary()). --spec(encode_properties/2 :: ([amqp_property_type()], [any()]) -> binary()). +-spec(generate_table/1 :: (rabbit_framing:amqp_table()) -> binary()). +-spec(encode_properties/2 :: + ([rabbit_framing:amqp_property_type()], [any()]) -> binary()). -spec(check_empty_content_body_frame_size/0 :: () -> 'ok'). --spec(ensure_content_encoded/1 :: (content()) -> encoded_content()). --spec(clear_encoded_content/1 :: (content()) -> unencoded_content()). +-spec(ensure_content_encoded/2 :: + (rabbit_types:content(), rabbit_types:protocol()) -> + rabbit_types:encoded_content()). +-spec(clear_encoded_content/1 :: + (rabbit_types:content()) -> rabbit_types:unencoded_content()). +-spec(map_exception/3 :: (rabbit_channel:channel_number(), + rabbit_types:amqp_error() | any(), + rabbit_types:protocol()) -> + {boolean(), + rabbit_channel:channel_number(), + rabbit_framing:amqp_method_record()}). -endif. %%---------------------------------------------------------------------------- -build_simple_method_frame(ChannelInt, MethodRecord) -> - MethodFields = rabbit_framing:encode_method_fields(MethodRecord), +build_simple_method_frame(ChannelInt, MethodRecord, Protocol) -> + MethodFields = Protocol:encode_method_fields(MethodRecord), MethodName = rabbit_misc:method_record_type(MethodRecord), - {ClassId, MethodId} = rabbit_framing:method_id(MethodName), + {ClassId, MethodId} = Protocol:method_id(MethodName), create_frame(1, ChannelInt, [<>, MethodFields]). -build_simple_content_frames(ChannelInt, - #content{class_id = ClassId, - properties = ContentProperties, - properties_bin = ContentPropertiesBin, - payload_fragments_rev = PayloadFragmentsRev}, - FrameMax) -> - {BodySize, ContentFrames} = build_content_frames(PayloadFragmentsRev, FrameMax, ChannelInt), +build_simple_content_frames(ChannelInt, Content, FrameMax, Protocol) -> + #content{class_id = ClassId, + properties_bin = ContentPropertiesBin, + payload_fragments_rev = PayloadFragmentsRev} = + ensure_content_encoded(Content, Protocol), + {BodySize, ContentFrames} = + build_content_frames(PayloadFragmentsRev, FrameMax, ChannelInt), HeaderFrame = create_frame(2, ChannelInt, [<>, - maybe_encode_properties(ContentProperties, ContentPropertiesBin)]), + ContentPropertiesBin]), [HeaderFrame | ContentFrames]. -maybe_encode_properties(_ContentProperties, ContentPropertiesBin) - when is_binary(ContentPropertiesBin) -> - ContentPropertiesBin; -maybe_encode_properties(ContentProperties, none) -> - rabbit_framing:encode_properties(ContentProperties). - -build_content_frames(FragmentsRev, FrameMax, ChannelInt) -> - BodyPayloadMax = if - FrameMax == 0 -> - none; - true -> +build_content_frames(FragsRev, FrameMax, ChannelInt) -> + BodyPayloadMax = if FrameMax == 0 -> + iolist_size(FragsRev); + true -> FrameMax - ?EMPTY_CONTENT_BODY_FRAME_SIZE end, - build_content_frames(0, [], FragmentsRev, BodyPayloadMax, ChannelInt). + build_content_frames(0, [], BodyPayloadMax, [], + lists:reverse(FragsRev), BodyPayloadMax, ChannelInt). -build_content_frames(SizeAcc, FragmentAcc, [], _BodyPayloadMax, _ChannelInt) -> - {SizeAcc, FragmentAcc}; -build_content_frames(SizeAcc, FragmentAcc, [Fragment | FragmentsRev], - BodyPayloadMax, ChannelInt) - when is_number(BodyPayloadMax) and (size(Fragment) > BodyPayloadMax) -> - <> = Fragment, - build_content_frames(SizeAcc, FragmentAcc, [Tail, Head | FragmentsRev], - BodyPayloadMax, ChannelInt); -build_content_frames(SizeAcc, FragmentAcc, [<<>> | FragmentsRev], - BodyPayloadMax, ChannelInt) -> - build_content_frames(SizeAcc, FragmentAcc, FragmentsRev, BodyPayloadMax, ChannelInt); -build_content_frames(SizeAcc, FragmentAcc, [Fragment | FragmentsRev], - BodyPayloadMax, ChannelInt) -> - build_content_frames(SizeAcc + size(Fragment), - [create_frame(3, ChannelInt, Fragment) | FragmentAcc], - FragmentsRev, - BodyPayloadMax, - ChannelInt). +build_content_frames(SizeAcc, FramesAcc, _FragSizeRem, [], + [], _BodyPayloadMax, _ChannelInt) -> + {SizeAcc, lists:reverse(FramesAcc)}; +build_content_frames(SizeAcc, FramesAcc, FragSizeRem, FragAcc, + Frags, BodyPayloadMax, ChannelInt) + when FragSizeRem == 0 orelse Frags == [] -> + Frame = create_frame(3, ChannelInt, lists:reverse(FragAcc)), + FrameSize = BodyPayloadMax - FragSizeRem, + build_content_frames(SizeAcc + FrameSize, [Frame | FramesAcc], + BodyPayloadMax, [], Frags, BodyPayloadMax, ChannelInt); +build_content_frames(SizeAcc, FramesAcc, FragSizeRem, FragAcc, + [Frag | Frags], BodyPayloadMax, ChannelInt) -> + Size = size(Frag), + {NewFragSizeRem, NewFragAcc, NewFrags} = + if Size == 0 -> {FragSizeRem, FragAcc, Frags}; + Size =< FragSizeRem -> {FragSizeRem - Size, [Frag | FragAcc], Frags}; + true -> <> = + Frag, + {0, [Head | FragAcc], [Tail | Frags]} + end, + build_content_frames(SizeAcc, FramesAcc, NewFragSizeRem, NewFragAcc, + NewFrags, BodyPayloadMax, ChannelInt). build_heartbeat_frame() -> create_frame(?FRAME_HEARTBEAT, 0, <<>>). @@ -273,13 +269,25 @@ ComputedSize, ?EMPTY_CONTENT_BODY_FRAME_SIZE}) end. -ensure_content_encoded(Content = #content{properties_bin = PropsBin}) - when PropsBin =/= 'none' -> +ensure_content_encoded(Content = #content{properties_bin = PropBin, + protocol = Protocol}, Protocol) + when PropBin =/= none -> Content; -ensure_content_encoded(Content = #content{properties = Props}) -> - Content #content{properties_bin = rabbit_framing:encode_properties(Props)}. +ensure_content_encoded(Content = #content{properties = none, + properties_bin = PropBin, + protocol = Protocol}, Protocol1) + when PropBin =/= none -> + Props = Protocol:decode_properties(Content#content.class_id, PropBin), + Content#content{properties = Props, + properties_bin = Protocol1:encode_properties(Props), + protocol = Protocol1}; +ensure_content_encoded(Content = #content{properties = Props}, Protocol) + when Props =/= none -> + Content#content{properties_bin = Protocol:encode_properties(Props), + protocol = Protocol}. -clear_encoded_content(Content = #content{properties_bin = none}) -> +clear_encoded_content(Content = #content{properties_bin = none, + protocol = none}) -> Content; clear_encoded_content(Content = #content{properties = none}) -> %% Only clear when we can rebuild the properties_bin later in @@ -287,4 +295,46 @@ %% one of properties and properties_bin can be 'none' Content; clear_encoded_content(Content = #content{}) -> - Content#content{properties_bin = none}. + Content#content{properties_bin = none, protocol = none}. + +%% NB: this function is also used by the Erlang client +map_exception(Channel, Reason, Protocol) -> + {SuggestedClose, ReplyCode, ReplyText, FailedMethod} = + lookup_amqp_exception(Reason, Protocol), + ShouldClose = SuggestedClose orelse (Channel == 0), + {ClassId, MethodId} = case FailedMethod of + {_, _} -> FailedMethod; + none -> {0, 0}; + _ -> Protocol:method_id(FailedMethod) + end, + {CloseChannel, CloseMethod} = + case ShouldClose of + true -> {0, #'connection.close'{reply_code = ReplyCode, + reply_text = ReplyText, + class_id = ClassId, + method_id = MethodId}}; + false -> {Channel, #'channel.close'{reply_code = ReplyCode, + reply_text = ReplyText, + class_id = ClassId, + method_id = MethodId}} + end, + {ShouldClose, CloseChannel, CloseMethod}. + +lookup_amqp_exception(#amqp_error{name = Name, + explanation = Expl, + method = Method}, + Protocol) -> + {ShouldClose, Code, Text} = Protocol:lookup_amqp_exception(Name), + ExplBin = amqp_exception_explanation(Text, Expl), + {ShouldClose, Code, ExplBin, Method}; +lookup_amqp_exception(Other, Protocol) -> + rabbit_log:warning("Non-AMQP exit reason '~p'~n", [Other]), + {ShouldClose, Code, Text} = Protocol:lookup_amqp_exception(internal_error), + {ShouldClose, Code, Text, none}. + +amqp_exception_explanation(Text, Expl) -> + ExplBin = list_to_binary(Expl), + CompleteTextBin = <>, + if size(CompleteTextBin) > 255 -> <>; + true -> CompleteTextBin + end. diff -Nru rabbitmq-server-1.7.2/src/rabbit_binary_parser.erl rabbitmq-server-2.3.1/src/rabbit_binary_parser.erl --- rabbitmq-server-1.7.2/src/rabbit_binary_parser.erl 2010-02-15 16:01:03.000000000 +0000 +++ rabbitmq-server-2.3.1/src/rabbit_binary_parser.erl 2011-02-03 12:47:35.000000000 +0000 @@ -1,32 +1,17 @@ -%% The contents of this file are subject to the Mozilla Public License -%% Version 1.1 (the "License"); you may not use this file except in -%% compliance with the License. You may obtain a copy of the License at -%% http://www.mozilla.org/MPL/ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License +%% at http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and +%% limitations under the License. %% -%% Software distributed under the License is distributed on an "AS IS" -%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the -%% License for the specific language governing rights and limitations -%% under the License. +%% The Original Code is RabbitMQ. %% -%% The Original Code is RabbitMQ. -%% -%% The Initial Developers of the Original Code are LShift Ltd, -%% Cohesive Financial Technologies LLC, and Rabbit Technologies Ltd. -%% -%% Portions created before 22-Nov-2008 00:00:00 GMT by LShift Ltd, -%% Cohesive Financial Technologies LLC, or Rabbit Technologies Ltd -%% are Copyright (C) 2007-2008 LShift Ltd, Cohesive Financial -%% Technologies LLC, and Rabbit Technologies Ltd. -%% -%% Portions created by LShift Ltd are Copyright (C) 2007-2010 LShift -%% Ltd. Portions created by Cohesive Financial Technologies LLC are -%% Copyright (C) 2007-2010 Cohesive Financial Technologies -%% LLC. Portions created by Rabbit Technologies Ltd are Copyright -%% (C) 2007-2010 Rabbit Technologies Ltd. -%% -%% All Rights Reserved. -%% -%% Contributor(s): ______________________________________. +%% The Initial Developer of the Original Code is VMware, Inc. +%% Copyright (c) 2007-2011 VMware, Inc. All rights reserved. %% -module(rabbit_binary_parser). @@ -36,16 +21,17 @@ -export([parse_table/1, parse_properties/2]). -export([ensure_content_decoded/1, clear_decoded_content/1]). --import(lists). - %%---------------------------------------------------------------------------- -ifdef(use_specs). --spec(parse_table/1 :: (binary()) -> amqp_table()). --spec(parse_properties/2 :: ([amqp_property_type()], binary()) -> [any()]). --spec(ensure_content_decoded/1 :: (content()) -> decoded_content()). --spec(clear_decoded_content/1 :: (content()) -> undecoded_content()). +-spec(parse_table/1 :: (binary()) -> rabbit_framing:amqp_table()). +-spec(parse_properties/2 :: + ([rabbit_framing:amqp_property_type()], binary()) -> [any()]). +-spec(ensure_content_decoded/1 :: + (rabbit_types:content()) -> rabbit_types:decoded_content()). +-spec(clear_decoded_content/1 :: + (rabbit_types:content()) -> rabbit_types:undecoded_content()). -endif. @@ -160,11 +146,12 @@ {parse_table(Table), Rest}. ensure_content_decoded(Content = #content{properties = Props}) - when Props =/= 'none' -> + when Props =/= none -> Content; -ensure_content_decoded(Content = #content{properties_bin = PropBin}) - when is_binary(PropBin) -> - Content#content{properties = rabbit_framing:decode_properties( +ensure_content_decoded(Content = #content{properties_bin = PropBin, + protocol = Protocol}) + when PropBin =/= none -> + Content#content{properties = Protocol:decode_properties( Content#content.class_id, PropBin)}. clear_decoded_content(Content = #content{properties = none}) -> diff -Nru rabbitmq-server-1.7.2/src/rabbit_binding.erl rabbitmq-server-2.3.1/src/rabbit_binding.erl --- rabbitmq-server-1.7.2/src/rabbit_binding.erl 1970-01-01 00:00:00.000000000 +0000 +++ rabbitmq-server-2.3.1/src/rabbit_binding.erl 2011-02-03 12:47:35.000000000 +0000 @@ -0,0 +1,422 @@ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License +%% at http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and +%% limitations under the License. +%% +%% The Original Code is RabbitMQ. +%% +%% The Initial Developer of the Original Code is VMware, Inc. +%% Copyright (c) 2007-2011 VMware, Inc. All rights reserved. +%% + +-module(rabbit_binding). +-include("rabbit.hrl"). + +-export([recover/0, exists/1, add/1, remove/1, add/2, remove/2, list/1]). +-export([list_for_source/1, list_for_destination/1, + list_for_source_and_destination/2]). +-export([new_deletions/0, combine_deletions/2, add_deletion/3, + process_deletions/2]). +-export([info_keys/0, info/1, info/2, info_all/1, info_all/2]). +%% these must all be run inside a mnesia tx +-export([has_for_source/1, remove_for_source/1, + remove_for_destination/1, remove_transient_for_destination/1]). + +%%---------------------------------------------------------------------------- + +-ifdef(use_specs). + +-export_type([key/0, deletions/0]). + +-type(key() :: binary()). + +-type(bind_errors() :: rabbit_types:error('source_not_found' | + 'destination_not_found' | + 'source_and_destination_not_found')). +-type(bind_res() :: 'ok' | bind_errors()). +-type(inner_fun() :: + fun((rabbit_types:exchange(), + rabbit_types:exchange() | rabbit_types:amqqueue()) -> + rabbit_types:ok_or_error(rabbit_types:amqp_error()))). +-type(bindings() :: [rabbit_types:binding()]). +-type(add_res() :: bind_res() | rabbit_misc:const(bind_res())). +-type(bind_or_error() :: bind_res() | rabbit_types:error('binding_not_found')). +-type(remove_res() :: bind_or_error() | rabbit_misc:const(bind_or_error())). + +-opaque(deletions() :: dict()). + +-spec(recover/0 :: () -> [rabbit_types:binding()]). +-spec(exists/1 :: (rabbit_types:binding()) -> boolean() | bind_errors()). +-spec(add/1 :: (rabbit_types:binding()) -> add_res()). +-spec(remove/1 :: (rabbit_types:binding()) -> remove_res()). +-spec(add/2 :: (rabbit_types:binding(), inner_fun()) -> add_res()). +-spec(remove/2 :: (rabbit_types:binding(), inner_fun()) -> remove_res()). +-spec(list/1 :: (rabbit_types:vhost()) -> bindings()). +-spec(list_for_source/1 :: + (rabbit_types:binding_source()) -> bindings()). +-spec(list_for_destination/1 :: + (rabbit_types:binding_destination()) -> bindings()). +-spec(list_for_source_and_destination/2 :: + (rabbit_types:binding_source(), rabbit_types:binding_destination()) -> + bindings()). +-spec(info_keys/0 :: () -> rabbit_types:info_keys()). +-spec(info/1 :: (rabbit_types:binding()) -> rabbit_types:infos()). +-spec(info/2 :: (rabbit_types:binding(), rabbit_types:info_keys()) -> + rabbit_types:infos()). +-spec(info_all/1 :: (rabbit_types:vhost()) -> [rabbit_types:infos()]). +-spec(info_all/2 ::(rabbit_types:vhost(), rabbit_types:info_keys()) + -> [rabbit_types:infos()]). +-spec(has_for_source/1 :: (rabbit_types:binding_source()) -> boolean()). +-spec(remove_for_source/1 :: (rabbit_types:binding_source()) -> bindings()). +-spec(remove_for_destination/1 :: + (rabbit_types:binding_destination()) -> deletions()). +-spec(remove_transient_for_destination/1 :: + (rabbit_types:binding_destination()) -> deletions()). +-spec(process_deletions/2 :: (deletions(), boolean()) -> 'ok'). +-spec(combine_deletions/2 :: (deletions(), deletions()) -> deletions()). +-spec(add_deletion/3 :: (rabbit_exchange:name(), + {'undefined' | rabbit_types:exchange(), + 'deleted' | 'not_deleted', + bindings()}, deletions()) -> deletions()). +-spec(new_deletions/0 :: () -> deletions()). + +-endif. + +%%---------------------------------------------------------------------------- + +-define(INFO_KEYS, [source_name, source_kind, + destination_name, destination_kind, + routing_key, arguments]). + +recover() -> + rabbit_misc:table_fold( + fun (Route = #route{binding = B}, Acc) -> + {_, ReverseRoute} = route_with_reverse(Route), + ok = mnesia:write(rabbit_route, Route, write), + ok = mnesia:write(rabbit_reverse_route, ReverseRoute, write), + [B | Acc] + end, [], rabbit_durable_route). + +exists(Binding) -> + binding_action( + Binding, fun (_Src, _Dst, B) -> + rabbit_misc:const(mnesia:read({rabbit_route, B}) /= []) + end). + +add(Binding) -> add(Binding, fun (_Src, _Dst) -> ok end). + +remove(Binding) -> remove(Binding, fun (_Src, _Dst) -> ok end). + +add(Binding, InnerFun) -> + binding_action( + Binding, + fun (Src, Dst, B) -> + %% this argument is used to check queue exclusivity; + %% in general, we want to fail on that in preference to + %% anything else + case InnerFun(Src, Dst) of + ok -> + case mnesia:read({rabbit_route, B}) of + [] -> ok = sync_binding(B, all_durable([Src, Dst]), + fun mnesia:write/3), + fun (Tx) -> + ok = rabbit_exchange:callback( + Src, add_binding, [Tx, Src, B]), + rabbit_event:notify_if( + not Tx, binding_created, info(B)) + end; + [_] -> fun rabbit_misc:const_ok/1 + end; + {error, _} = Err -> + rabbit_misc:const(Err) + end + end). + +remove(Binding, InnerFun) -> + binding_action( + Binding, + fun (Src, Dst, B) -> + Result = + case mnesia:match_object(rabbit_route, #route{binding = B}, + write) of + [] -> + {error, binding_not_found}; + [_] -> + case InnerFun(Src, Dst) of + ok -> + ok = sync_binding(B, all_durable([Src, Dst]), + fun mnesia:delete_object/3), + {ok, maybe_auto_delete(B#binding.source, + [B], new_deletions())}; + {error, _} = E -> + E + end + end, + case Result of + {error, _} = Err -> + rabbit_misc:const(Err); + {ok, Deletions} -> + fun (Tx) -> ok = process_deletions(Deletions, Tx) end + end + end). + +list(VHostPath) -> + VHostResource = rabbit_misc:r(VHostPath, '_'), + Route = #route{binding = #binding{source = VHostResource, + destination = VHostResource, + _ = '_'}, + _ = '_'}, + [B || #route{binding = B} <- mnesia:dirty_match_object(rabbit_route, + Route)]. + +list_for_source(SrcName) -> + Route = #route{binding = #binding{source = SrcName, _ = '_'}}, + [B || #route{binding = B} <- mnesia:dirty_match_object(rabbit_route, + Route)]. + +list_for_destination(DstName) -> + Route = #route{binding = #binding{destination = DstName, _ = '_'}}, + [reverse_binding(B) || #reverse_route{reverse_binding = B} <- + mnesia:dirty_match_object(rabbit_reverse_route, + reverse_route(Route))]. + +list_for_source_and_destination(SrcName, DstName) -> + Route = #route{binding = #binding{source = SrcName, + destination = DstName, + _ = '_'}}, + [B || #route{binding = B} <- mnesia:dirty_match_object(rabbit_route, + Route)]. + +info_keys() -> ?INFO_KEYS. + +map(VHostPath, F) -> + %% TODO: there is scope for optimisation here, e.g. using a + %% cursor, parallelising the function invocation + lists:map(F, list(VHostPath)). + +infos(Items, B) -> [{Item, i(Item, B)} || Item <- Items]. + +i(source_name, #binding{source = SrcName}) -> SrcName#resource.name; +i(source_kind, #binding{source = SrcName}) -> SrcName#resource.kind; +i(destination_name, #binding{destination = DstName}) -> DstName#resource.name; +i(destination_kind, #binding{destination = DstName}) -> DstName#resource.kind; +i(routing_key, #binding{key = RoutingKey}) -> RoutingKey; +i(arguments, #binding{args = Arguments}) -> Arguments; +i(Item, _) -> throw({bad_argument, Item}). + +info(B = #binding{}) -> infos(?INFO_KEYS, B). + +info(B = #binding{}, Items) -> infos(Items, B). + +info_all(VHostPath) -> map(VHostPath, fun (B) -> info(B) end). + +info_all(VHostPath, Items) -> map(VHostPath, fun (B) -> info(B, Items) end). + +has_for_source(SrcName) -> + Match = #route{binding = #binding{source = SrcName, _ = '_'}}, + %% we need to check for durable routes here too in case a bunch of + %% routes to durable queues have been removed temporarily as a + %% result of a node failure + contains(rabbit_route, Match) orelse contains(rabbit_durable_route, Match). + +remove_for_source(SrcName) -> + [begin + ok = mnesia:delete_object(rabbit_reverse_route, + reverse_route(Route), write), + ok = delete_forward_routes(Route), + Route#route.binding + end || Route <- mnesia:match_object( + rabbit_route, + #route{binding = #binding{source = SrcName, + _ = '_'}}, + write)]. + +remove_for_destination(DstName) -> + remove_for_destination(DstName, fun delete_forward_routes/1). + +remove_transient_for_destination(DstName) -> + remove_for_destination(DstName, fun delete_transient_forward_routes/1). + +%%---------------------------------------------------------------------------- + +all_durable(Resources) -> + lists:all(fun (#exchange{durable = D}) -> D; + (#amqqueue{durable = D}) -> D + end, Resources). + +binding_action(Binding = #binding{source = SrcName, + destination = DstName, + args = Arguments}, Fun) -> + call_with_source_and_destination( + SrcName, DstName, + fun (Src, Dst) -> + SortedArgs = rabbit_misc:sort_field_table(Arguments), + Fun(Src, Dst, Binding#binding{args = SortedArgs}) + end). + +sync_binding(Binding, Durable, Fun) -> + ok = case Durable of + true -> Fun(rabbit_durable_route, + #route{binding = Binding}, write); + false -> ok + end, + {Route, ReverseRoute} = route_with_reverse(Binding), + ok = Fun(rabbit_route, Route, write), + ok = Fun(rabbit_reverse_route, ReverseRoute, write), + ok. + +call_with_source_and_destination(SrcName, DstName, Fun) -> + SrcTable = table_for_resource(SrcName), + DstTable = table_for_resource(DstName), + ErrFun = fun (Err) -> rabbit_misc:const(Err) end, + rabbit_misc:execute_mnesia_tx_with_tail( + fun () -> + case {mnesia:read({SrcTable, SrcName}), + mnesia:read({DstTable, DstName})} of + {[Src], [Dst]} -> Fun(Src, Dst); + {[], [_] } -> ErrFun({error, source_not_found}); + {[_], [] } -> ErrFun({error, destination_not_found}); + {[], [] } -> ErrFun({error, + source_and_destination_not_found}) + end + end). + +table_for_resource(#resource{kind = exchange}) -> rabbit_exchange; +table_for_resource(#resource{kind = queue}) -> rabbit_queue. + +contains(Table, MatchHead) -> + continue(mnesia:select(Table, [{MatchHead, [], ['$_']}], 1, read)). + +continue('$end_of_table') -> false; +continue({[_|_], _}) -> true; +continue({[], Continuation}) -> continue(mnesia:select(Continuation)). + +remove_for_destination(DstName, FwdDeleteFun) -> + Bindings = + [begin + Route = reverse_route(ReverseRoute), + ok = FwdDeleteFun(Route), + ok = mnesia:delete_object(rabbit_reverse_route, + ReverseRoute, write), + Route#route.binding + end || ReverseRoute + <- mnesia:match_object( + rabbit_reverse_route, + reverse_route(#route{ + binding = #binding{ + destination = DstName, + _ = '_'}}), + write)], + group_bindings_fold(fun maybe_auto_delete/3, new_deletions(), + lists:keysort(#binding.source, Bindings)). + +%% Requires that its input binding list is sorted in exchange-name +%% order, so that the grouping of bindings (for passing to +%% group_bindings_and_auto_delete1) works properly. +group_bindings_fold(_Fun, Acc, []) -> + Acc; +group_bindings_fold(Fun, Acc, [B = #binding{source = SrcName} | Bs]) -> + group_bindings_fold(Fun, SrcName, Acc, Bs, [B]). + +group_bindings_fold( + Fun, SrcName, Acc, [B = #binding{source = SrcName} | Bs], Bindings) -> + group_bindings_fold(Fun, SrcName, Acc, Bs, [B | Bindings]); +group_bindings_fold(Fun, SrcName, Acc, Removed, Bindings) -> + %% Either Removed is [], or its head has a non-matching SrcName. + group_bindings_fold(Fun, Fun(SrcName, Bindings, Acc), Removed). + +maybe_auto_delete(XName, Bindings, Deletions) -> + case mnesia:read({rabbit_exchange, XName}) of + [] -> + add_deletion(XName, {undefined, not_deleted, Bindings}, Deletions); + [X] -> + add_deletion(XName, {X, not_deleted, Bindings}, + case rabbit_exchange:maybe_auto_delete(X) of + not_deleted -> Deletions; + {deleted, Deletions1} -> combine_deletions( + Deletions, Deletions1) + end) + end. + +delete_forward_routes(Route) -> + ok = mnesia:delete_object(rabbit_route, Route, write), + ok = mnesia:delete_object(rabbit_durable_route, Route, write). + +delete_transient_forward_routes(Route) -> + ok = mnesia:delete_object(rabbit_route, Route, write). + +route_with_reverse(#route{binding = Binding}) -> + route_with_reverse(Binding); +route_with_reverse(Binding = #binding{}) -> + Route = #route{binding = Binding}, + {Route, reverse_route(Route)}. + +reverse_route(#route{binding = Binding}) -> + #reverse_route{reverse_binding = reverse_binding(Binding)}; + +reverse_route(#reverse_route{reverse_binding = Binding}) -> + #route{binding = reverse_binding(Binding)}. + +reverse_binding(#reverse_binding{source = SrcName, + destination = DstName, + key = Key, + args = Args}) -> + #binding{source = SrcName, + destination = DstName, + key = Key, + args = Args}; + +reverse_binding(#binding{source = SrcName, + destination = DstName, + key = Key, + args = Args}) -> + #reverse_binding{source = SrcName, + destination = DstName, + key = Key, + args = Args}. + +%% ---------------------------------------------------------------------------- +%% Binding / exchange deletion abstraction API +%% ---------------------------------------------------------------------------- + +anything_but( NotThis, NotThis, NotThis) -> NotThis; +anything_but( NotThis, NotThis, This) -> This; +anything_but( NotThis, This, NotThis) -> This; +anything_but(_NotThis, This, This) -> This. + +new_deletions() -> dict:new(). + +add_deletion(XName, Entry, Deletions) -> + dict:update(XName, fun (Entry1) -> merge_entry(Entry1, Entry) end, + Entry, Deletions). + +combine_deletions(Deletions1, Deletions2) -> + dict:merge(fun (_XName, Entry1, Entry2) -> merge_entry(Entry1, Entry2) end, + Deletions1, Deletions2). + +merge_entry({X1, Deleted1, Bindings1}, {X2, Deleted2, Bindings2}) -> + {anything_but(undefined, X1, X2), + anything_but(not_deleted, Deleted1, Deleted2), + [Bindings1 | Bindings2]}. + +process_deletions(Deletions, Tx) -> + dict:fold( + fun (_XName, {X, Deleted, Bindings}, ok) -> + FlatBindings = lists:flatten(Bindings), + [rabbit_event:notify_if(not Tx, binding_deleted, info(B)) || + B <- FlatBindings], + case Deleted of + not_deleted -> + rabbit_exchange:callback(X, remove_bindings, + [Tx, X, FlatBindings]); + deleted -> + rabbit_event:notify_if(not Tx, exchange_deleted, + [{name, X#exchange.name}]), + rabbit_exchange:callback(X, delete, [Tx, X, FlatBindings]) + end + end, ok, Deletions). diff -Nru rabbitmq-server-1.7.2/src/rabbit_channel.erl rabbitmq-server-2.3.1/src/rabbit_channel.erl --- rabbitmq-server-1.7.2/src/rabbit_channel.erl 2010-02-15 16:01:03.000000000 +0000 +++ rabbitmq-server-2.3.1/src/rabbit_channel.erl 2011-02-03 12:47:35.000000000 +0000 @@ -1,32 +1,17 @@ -%% The contents of this file are subject to the Mozilla Public Licenses -%% Version 1.1 (the "License"); you may not use this file except in -%% compliance with the License. You may obtain a copy of the License at -%% http://www.mozilla.org/MPL/ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License +%% at http://www.mozilla.org/MPL/ %% -%% Software distributed under the License is distributed on an "AS IS" -%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the -%% License for the specific language governing rights and limitations -%% under the License. +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and +%% limitations under the License. %% -%% The Original Code is RabbitMQ. +%% The Original Code is RabbitMQ. %% -%% The Initial Developers of the Original Code are LShift Ltd, -%% Cohesive Financial Technologies LLC, and Rabbit Technologies Ltd. -%% -%% Portions created before 22-Nov-2008 00:00:00 GMT by LShift Ltd, -%% Cohesive Financial Technologies LLC, or Rabbit Technologies Ltd -%% are Copyright (C) 2007-2008 LShift Ltd, Cohesive Financial -%% Technologies LLC, and Rabbit Technologies Ltd. -%% -%% Portions created by LShift Ltd are Copyright (C) 2007-2010 LShift -%% Ltd. Portions created by Cohesive Financial Technologies LLC are -%% Copyright (C) 2007-2010 Cohesive Financial Technologies -%% LLC. Portions created by Rabbit Technologies Ltd are Copyright -%% (C) 2007-2010 Rabbit Technologies Ltd. -%% -%% All Rights Reserved. -%% -%% Contributor(s): ______________________________________. +%% The Initial Developer of the Original Code is VMware, Inc. +%% Copyright (c) 2007-2011 VMware, Inc. All rights reserved. %% -module(rabbit_channel). @@ -35,64 +20,84 @@ -behaviour(gen_server2). --export([start_link/5, do/2, do/3, shutdown/1]). --export([send_command/2, deliver/4, conserve_memory/2]). +-export([start_link/7, do/2, do/3, flush/1, shutdown/1]). +-export([send_command/2, deliver/4, flushed/2, confirm/2]). -export([list/0, info_keys/0, info/1, info/2, info_all/0, info_all/1]). +-export([emit_stats/1]). --export([init/1, terminate/2, code_change/3, - handle_call/3, handle_cast/2, handle_info/2, handle_pre_hibernate/1]). +-export([init/1, terminate/2, code_change/3, handle_call/3, handle_cast/2, + handle_info/2, handle_pre_hibernate/1, prioritise_call/3, + prioritise_cast/2]). -record(ch, {state, channel, reader_pid, writer_pid, limiter_pid, - transaction_id, tx_participants, next_tag, + start_limiter_fun, transaction_id, tx_participants, next_tag, uncommitted_ack_q, unacked_message_q, - username, virtual_host, - most_recently_declared_queue, consumer_mapping}). - --define(HIBERNATE_AFTER_MIN, 1000). --define(DESIRED_HIBERNATE, 10000). + user, virtual_host, most_recently_declared_queue, + consumer_mapping, blocking, queue_collector_pid, stats_timer, + confirm_enabled, publish_seqno, unconfirmed, confirmed}). -define(MAX_PERMISSION_CACHE_SIZE, 12). --define(INFO_KEYS, +-define(STATISTICS_KEYS, [pid, - connection, - number, - user, - vhost, transactional, + confirm, consumer_count, messages_unacknowledged, + messages_unconfirmed, acks_uncommitted, - prefetch_count]). + prefetch_count, + client_flow_blocked]). + +-define(CREATION_EVENT_KEYS, + [pid, + connection, + number, + user, + vhost]). + +-define(INFO_KEYS, ?CREATION_EVENT_KEYS ++ ?STATISTICS_KEYS -- [pid]). %%---------------------------------------------------------------------------- -ifdef(use_specs). --spec(start_link/5 :: - (channel_number(), pid(), pid(), username(), vhost()) -> pid()). --spec(do/2 :: (pid(), amqp_method()) -> 'ok'). --spec(do/3 :: (pid(), amqp_method(), maybe(content())) -> 'ok'). +-export_type([channel_number/0]). + +-type(channel_number() :: non_neg_integer()). + +-spec(start_link/7 :: + (channel_number(), pid(), pid(), rabbit_types:user(), + rabbit_types:vhost(), pid(), + fun ((non_neg_integer()) -> rabbit_types:ok(pid()))) -> + rabbit_types:ok_pid_or_error()). +-spec(do/2 :: (pid(), rabbit_framing:amqp_method_record()) -> 'ok'). +-spec(do/3 :: (pid(), rabbit_framing:amqp_method_record(), + rabbit_types:maybe(rabbit_types:content())) -> 'ok'). +-spec(flush/1 :: (pid()) -> 'ok'). -spec(shutdown/1 :: (pid()) -> 'ok'). --spec(send_command/2 :: (pid(), amqp_method()) -> 'ok'). --spec(deliver/4 :: (pid(), ctag(), boolean(), msg()) -> 'ok'). --spec(conserve_memory/2 :: (pid(), boolean()) -> 'ok'). +-spec(send_command/2 :: (pid(), rabbit_framing:amqp_method_record()) -> 'ok'). +-spec(deliver/4 :: + (pid(), rabbit_types:ctag(), boolean(), rabbit_amqqueue:qmsg()) + -> 'ok'). +-spec(flushed/2 :: (pid(), pid()) -> 'ok'). +-spec(confirm/2 ::(pid(), [non_neg_integer()]) -> 'ok'). -spec(list/0 :: () -> [pid()]). --spec(info_keys/0 :: () -> [info_key()]). --spec(info/1 :: (pid()) -> [info()]). --spec(info/2 :: (pid(), [info_key()]) -> [info()]). --spec(info_all/0 :: () -> [[info()]]). --spec(info_all/1 :: ([info_key()]) -> [[info()]]). +-spec(info_keys/0 :: () -> rabbit_types:info_keys()). +-spec(info/1 :: (pid()) -> rabbit_types:infos()). +-spec(info/2 :: (pid(), rabbit_types:info_keys()) -> rabbit_types:infos()). +-spec(info_all/0 :: () -> [rabbit_types:infos()]). +-spec(info_all/1 :: (rabbit_types:info_keys()) -> [rabbit_types:infos()]). +-spec(emit_stats/1 :: (pid()) -> 'ok'). -endif. %%---------------------------------------------------------------------------- -start_link(Channel, ReaderPid, WriterPid, Username, VHost) -> - {ok, Pid} = gen_server2:start_link( - ?MODULE, [Channel, ReaderPid, WriterPid, - Username, VHost], []), - Pid. +start_link(Channel, ReaderPid, WriterPid, User, VHost, CollectorPid, + StartLimiterFun) -> + gen_server2:start_link(?MODULE, [Channel, ReaderPid, WriterPid, User, + VHost, CollectorPid, StartLimiterFun], []). do(Pid, Method) -> do(Pid, Method, none). @@ -100,6 +105,9 @@ do(Pid, Method, Content) -> gen_server2:cast(Pid, {method, Method, Content}). +flush(Pid) -> + gen_server2:call(Pid, flush). + shutdown(Pid) -> gen_server2:cast(Pid, terminate). @@ -109,8 +117,11 @@ deliver(Pid, ConsumerTag, AckRequired, Msg) -> gen_server2:cast(Pid, {deliver, ConsumerTag, AckRequired, Msg}). -conserve_memory(Pid, Conserve) -> - gen_server2:pcast(Pid, 9, {conserve_memory, Conserve}). +flushed(Pid, QPid) -> + gen_server2:cast(Pid, {flushed, QPid}). + +confirm(Pid, MsgSeqNos) -> + gen_server2:cast(Pid, {confirm, MsgSeqNos, self()}). list() -> pg_local:get_members(rabbit_channels). @@ -118,10 +129,10 @@ info_keys() -> ?INFO_KEYS. info(Pid) -> - gen_server2:pcall(Pid, 9, info, infinity). + gen_server2:call(Pid, info, infinity). info(Pid, Items) -> - case gen_server2:pcall(Pid, 9, {info, Items}, infinity) of + case gen_server2:call(Pid, {info, Items}, infinity) of {ok, Res} -> Res; {error, Error} -> throw(Error) end. @@ -132,30 +143,61 @@ info_all(Items) -> rabbit_misc:filter_exit_map(fun (C) -> info(C, Items) end, list()). +emit_stats(Pid) -> + gen_server2:cast(Pid, emit_stats). + %%--------------------------------------------------------------------------- -init([Channel, ReaderPid, WriterPid, Username, VHost]) -> +init([Channel, ReaderPid, WriterPid, User, VHost, CollectorPid, + StartLimiterFun]) -> process_flag(trap_exit, true), - link(WriterPid), - rabbit_alarm:register(self(), {?MODULE, conserve_memory, []}), ok = pg_local:join(rabbit_channels, self()), - {ok, #ch{state = starting, - channel = Channel, - reader_pid = ReaderPid, - writer_pid = WriterPid, - limiter_pid = undefined, - transaction_id = none, - tx_participants = sets:new(), - next_tag = 1, - uncommitted_ack_q = queue:new(), - unacked_message_q = queue:new(), - username = Username, - virtual_host = VHost, - most_recently_declared_queue = <<>>, - consumer_mapping = dict:new()}, - hibernate, + StatsTimer = rabbit_event:init_stats_timer(), + State = #ch{state = starting, + channel = Channel, + reader_pid = ReaderPid, + writer_pid = WriterPid, + limiter_pid = undefined, + start_limiter_fun = StartLimiterFun, + transaction_id = none, + tx_participants = sets:new(), + next_tag = 1, + uncommitted_ack_q = queue:new(), + unacked_message_q = queue:new(), + user = User, + virtual_host = VHost, + most_recently_declared_queue = <<>>, + consumer_mapping = dict:new(), + blocking = dict:new(), + queue_collector_pid = CollectorPid, + stats_timer = StatsTimer, + confirm_enabled = false, + publish_seqno = 1, + unconfirmed = gb_trees:empty(), + confirmed = []}, + rabbit_event:notify(channel_created, infos(?CREATION_EVENT_KEYS, State)), + rabbit_event:if_enabled(StatsTimer, + fun() -> internal_emit_stats(State) end), + {ok, State, hibernate, {backoff, ?HIBERNATE_AFTER_MIN, ?HIBERNATE_AFTER_MIN, ?DESIRED_HIBERNATE}}. +prioritise_call(Msg, _From, _State) -> + case Msg of + info -> 9; + {info, _Items} -> 9; + _ -> 0 + end. + +prioritise_cast(Msg, _State) -> + case Msg of + emit_stats -> 7; + {confirm, _MsgSeqNos, _QPid} -> 5; + _ -> 0 + end. + +handle_call(flush, _From, State) -> + reply(ok, State); + handle_call(info, _From, State) -> reply(infos(?INFO_KEYS, State), State); @@ -179,17 +221,18 @@ {stop, normal, State#ch{state = terminating}} catch exit:Reason = #amqp_error{} -> - ok = rollback_and_notify(State), MethodName = rabbit_misc:method_record_type(Method), - State#ch.reader_pid ! {channel_exit, State#ch.channel, - Reason#amqp_error{method = MethodName}}, - {stop, normal, State#ch{state = terminating}}; + {stop, normal, terminating(Reason#amqp_error{method = MethodName}, + State)}; exit:normal -> {stop, normal, State}; _:Reason -> {stop, {Reason, erlang:get_stacktrace()}, State} end; +handle_cast({flushed, QPid}, State) -> + {noreply, queue_blocked(QPid, State), hibernate}; + handle_cast(terminate, State) -> {stop, normal, State}; @@ -197,29 +240,67 @@ ok = rabbit_writer:send_command(WriterPid, Msg), noreply(State); -handle_cast({deliver, ConsumerTag, AckRequired, Msg}, +handle_cast({deliver, ConsumerTag, AckRequired, + Msg = {_QName, QPid, _MsgId, Redelivered, + #basic_message{exchange_name = ExchangeName, + routing_key = RoutingKey, + content = Content}}}, State = #ch{writer_pid = WriterPid, next_tag = DeliveryTag}) -> - State1 = lock_message(AckRequired, {DeliveryTag, ConsumerTag, Msg}, State), - ok = internal_deliver(WriterPid, true, ConsumerTag, DeliveryTag, Msg), + State1 = lock_message(AckRequired, + ack_record(DeliveryTag, ConsumerTag, Msg), + State), + + M = #'basic.deliver'{consumer_tag = ConsumerTag, + delivery_tag = DeliveryTag, + redelivered = Redelivered, + exchange = ExchangeName#resource.name, + routing_key = RoutingKey}, + rabbit_writer:send_command_and_notify(WriterPid, QPid, self(), M, Content), + + maybe_incr_stats([{QPid, 1}], + case AckRequired of + true -> deliver; + false -> deliver_no_ack + end, State), noreply(State1#ch{next_tag = DeliveryTag + 1}); -handle_cast({conserve_memory, Conserve}, State) -> - ok = clear_permission_cache(), - ok = rabbit_writer:send_command( - State#ch.writer_pid, #'channel.flow'{active = not(Conserve)}), - noreply(State). +handle_cast(emit_stats, State = #ch{stats_timer = StatsTimer}) -> + internal_emit_stats(State), + noreply([ensure_stats_timer], + State#ch{stats_timer = rabbit_event:reset_stats_timer(StatsTimer)}); + +handle_cast({confirm, MsgSeqNos, From}, State) -> + State1 = #ch{confirmed = C} = confirm(MsgSeqNos, From, State), + noreply([send_confirms], State1, case C of [] -> hibernate; _ -> 0 end). -handle_info({'EXIT', WriterPid, Reason = {writer, send_failed, _Error}}, - State = #ch{writer_pid = WriterPid}) -> - State#ch.reader_pid ! {channel_exit, State#ch.channel, Reason}, - {stop, normal, State}; -handle_info({'EXIT', _Pid, Reason}, State) -> - {stop, Reason, State}. +handle_info(timeout, State) -> + noreply(State); -handle_pre_hibernate(State) -> +handle_info({'DOWN', _MRef, process, QPid, Reason}, + State = #ch{unconfirmed = UC}) -> + %% TODO: this does a complete scan and partial rebuild of the + %% tree, which is quite efficient. To do better we'd need to + %% maintain a secondary mapping, from QPids to MsgSeqNos. + {MXs, UC1} = remove_queue_unconfirmed( + gb_trees:next(gb_trees:iterator(UC)), QPid, + {[], UC}, State), + erase_queue_stats(QPid), + State1 = case Reason of + normal -> record_confirms(MXs, State#ch{unconfirmed = UC1}); + _ -> send_nacks(MXs, State#ch{unconfirmed = UC1}) + end, + noreply(queue_blocked(QPid, State1)). + +handle_pre_hibernate(State = #ch{stats_timer = StatsTimer}) -> ok = clear_permission_cache(), - {hibernate, State}. + rabbit_event:if_enabled(StatsTimer, + fun () -> + internal_emit_stats( + State, [{idle_since, now()}]) + end), + StatsTimer1 = rabbit_event:stop_stats_timer(StatsTimer), + {hibernate, State#ch{stats_timer = StatsTimer1}}. terminate(_Reason, State = #ch{state = terminating}) -> terminate(State); @@ -227,8 +308,10 @@ terminate(Reason, State) -> Res = rollback_and_notify(State), case Reason of - normal -> ok = Res; - _ -> ok + normal -> ok = Res; + shutdown -> ok = Res; + {shutdown, _Term} -> ok = Res; + _ -> ok end, terminate(State). @@ -237,9 +320,30 @@ %%--------------------------------------------------------------------------- -reply(Reply, NewState) -> {reply, Reply, NewState, hibernate}. +reply(Reply, NewState) -> reply(Reply, [], NewState). + +reply(Reply, Mask, NewState) -> reply(Reply, Mask, NewState, hibernate). + +reply(Reply, Mask, NewState, Timeout) -> + {reply, Reply, next_state(Mask, NewState), Timeout}. + +noreply(NewState) -> noreply([], NewState). -noreply(NewState) -> {noreply, NewState, hibernate}. +noreply(Mask, NewState) -> noreply(Mask, NewState, hibernate). + +noreply(Mask, NewState, Timeout) -> + {noreply, next_state(Mask, NewState), Timeout}. + +next_state(Mask, State) -> + lists:foldl(fun (ensure_stats_timer, State1) -> ensure_stats_timer(State1); + (send_confirms, State1) -> send_confirms(State1) + end, State, [ensure_stats_timer, send_confirms] -- Mask). + +ensure_stats_timer(State = #ch{stats_timer = StatsTimer}) -> + ChPid = self(), + State#ch{stats_timer = rabbit_event:ensure_stats_timer( + StatsTimer, + fun() -> emit_stats(ChPid) end)}. return_ok(State, true, _Msg) -> {noreply, State}; return_ok(State, false, Msg) -> {reply, Msg, State}. @@ -247,23 +351,19 @@ ok_msg(true, _Msg) -> undefined; ok_msg(false, Msg) -> Msg. -return_queue_declare_ok(State, NoWait, Q) -> - NewState = State#ch{most_recently_declared_queue = - (Q#amqqueue.name)#resource.name}, - case NoWait of - true -> {noreply, NewState}; - false -> - {ok, ActualName, MessageCount, ConsumerCount} = - rabbit_misc:with_exit_handler( - fun () -> {ok, Q#amqqueue.name, 0, 0} end, - fun () -> rabbit_amqqueue:stat(Q) end), - Reply = #'queue.declare_ok'{queue = ActualName#resource.name, - message_count = MessageCount, - consumer_count = ConsumerCount}, - {reply, Reply, NewState} - end. +terminating(Reason, State = #ch{channel = Channel, reader_pid = Reader}) -> + ok = rollback_and_notify(State), + Reader ! {channel_exit, Channel, Reason}, + State#ch{state = terminating}. + +return_queue_declare_ok(#resource{name = ActualName}, + NoWait, MessageCount, ConsumerCount, State) -> + return_ok(State#ch{most_recently_declared_queue = ActualName}, NoWait, + #'queue.declare_ok'{queue = ActualName, + message_count = MessageCount, + consumer_count = ConsumerCount}). -check_resource_access(Username, Resource, Perm) -> +check_resource_access(User, Resource, Perm) -> V = {Resource, Perm}, Cache = case get(permission_cache) of undefined -> []; @@ -273,7 +373,7 @@ case lists:member(V, Cache) of true -> lists:delete(V, Cache); false -> ok = rabbit_access_control:check_resource_access( - Username, Resource, Perm), + User, Resource, Perm), lists:sublist(Cache, ?MAX_PERMISSION_CACHE_SIZE - 1) end, put(permission_cache, [V | CacheTail]), @@ -283,34 +383,65 @@ erase(permission_cache), ok. -check_configure_permitted(Resource, #ch{ username = Username}) -> - check_resource_access(Username, Resource, configure). +check_configure_permitted(Resource, #ch{user = User}) -> + check_resource_access(User, Resource, configure). -check_write_permitted(Resource, #ch{ username = Username}) -> - check_resource_access(Username, Resource, write). +check_write_permitted(Resource, #ch{user = User}) -> + check_resource_access(User, Resource, write). -check_read_permitted(Resource, #ch{ username = Username}) -> - check_resource_access(Username, Resource, read). +check_read_permitted(Resource, #ch{user = User}) -> + check_resource_access(User, Resource, read). -expand_queue_name_shortcut(<<>>, #ch{ most_recently_declared_queue = <<>> }) -> +check_user_id_header(#'P_basic'{user_id = undefined}, _) -> + ok; +check_user_id_header(#'P_basic'{user_id = Username}, + #ch{user = #user{username = Username}}) -> + ok; +check_user_id_header(#'P_basic'{user_id = Claimed}, + #ch{user = #user{username = Actual}}) -> rabbit_misc:protocol_error( - not_allowed, "no previously declared queue", []); -expand_queue_name_shortcut(<<>>, #ch{ virtual_host = VHostPath, - most_recently_declared_queue = MRDQ }) -> + precondition_failed, "user_id property set to '~s' but " + "authenticated user was '~s'", [Claimed, Actual]). + +check_internal_exchange(#exchange{name = Name, internal = true}) -> + rabbit_misc:protocol_error(access_refused, + "cannot publish to internal ~s", + [rabbit_misc:rs(Name)]); +check_internal_exchange(_) -> + ok. + +expand_queue_name_shortcut(<<>>, #ch{most_recently_declared_queue = <<>>}) -> + rabbit_misc:protocol_error( + not_found, "no previously declared queue", []); +expand_queue_name_shortcut(<<>>, #ch{virtual_host = VHostPath, + most_recently_declared_queue = MRDQ}) -> rabbit_misc:r(VHostPath, queue, MRDQ); -expand_queue_name_shortcut(QueueNameBin, #ch{ virtual_host = VHostPath }) -> +expand_queue_name_shortcut(QueueNameBin, #ch{virtual_host = VHostPath}) -> rabbit_misc:r(VHostPath, queue, QueueNameBin). expand_routing_key_shortcut(<<>>, <<>>, - #ch{ most_recently_declared_queue = <<>> }) -> + #ch{most_recently_declared_queue = <<>>}) -> rabbit_misc:protocol_error( - not_allowed, "no previously declared queue", []); + not_found, "no previously declared queue", []); expand_routing_key_shortcut(<<>>, <<>>, - #ch{ most_recently_declared_queue = MRDQ }) -> + #ch{most_recently_declared_queue = MRDQ}) -> MRDQ; expand_routing_key_shortcut(_QueueNameBin, RoutingKey, _State) -> RoutingKey. +expand_binding(queue, DestinationNameBin, RoutingKey, State) -> + {expand_queue_name_shortcut(DestinationNameBin, State), + expand_routing_key_shortcut(DestinationNameBin, RoutingKey, State)}; +expand_binding(exchange, DestinationNameBin, RoutingKey, State) -> + {rabbit_misc:r(State#ch.virtual_host, exchange, DestinationNameBin), + RoutingKey}. + +check_not_default_exchange(#resource{kind = exchange, name = <<"">>}) -> + rabbit_misc:protocol_error( + access_refused, "operation not permitted on the default exchange", []); +check_not_default_exchange(_) -> + ok. + %% check that an exchange/queue name does not contain the reserved %% "amq." prefix. %% @@ -331,6 +462,60 @@ check_name(_Kind, NameBin) -> NameBin. +queue_blocked(QPid, State = #ch{blocking = Blocking}) -> + case dict:find(QPid, Blocking) of + error -> State; + {ok, MRef} -> true = erlang:demonitor(MRef), + Blocking1 = dict:erase(QPid, Blocking), + ok = case dict:size(Blocking1) of + 0 -> rabbit_writer:send_command( + State#ch.writer_pid, + #'channel.flow_ok'{active = false}); + _ -> ok + end, + State#ch{blocking = Blocking1} + end. + +remove_queue_unconfirmed(none, _QPid, Acc, _State) -> + Acc; +remove_queue_unconfirmed({MsgSeqNo, XQ, Next}, QPid, Acc, State) -> + remove_queue_unconfirmed(gb_trees:next(Next), QPid, + remove_qmsg(MsgSeqNo, QPid, XQ, Acc, State), + State). + +record_confirm(undefined, _, State) -> + State; +record_confirm(MsgSeqNo, XName, State) -> + record_confirms([{MsgSeqNo, XName}], State). + +record_confirms([], State) -> + State; +record_confirms(MXs, State = #ch{confirmed = C}) -> + State#ch{confirmed = [MXs | C]}. + +confirm([], _QPid, State) -> + State; +confirm(MsgSeqNos, QPid, State = #ch{unconfirmed = UC}) -> + {MXs, UC1} = + lists:foldl( + fun(MsgSeqNo, {_DMs, UC0} = Acc) -> + case gb_trees:lookup(MsgSeqNo, UC0) of + none -> Acc; + {value, XQ} -> remove_qmsg(MsgSeqNo, QPid, XQ, Acc, State) + end + end, {[], UC}, MsgSeqNos), + record_confirms(MXs, State#ch{unconfirmed = UC1}). + +remove_qmsg(MsgSeqNo, QPid, {XName, Qs}, {MXs, UC}, State) -> + Qs1 = sets:del_element(QPid, Qs), + %% these confirms will be emitted even when a queue dies, but that + %% should be fine, since the queue stats get erased immediately + maybe_incr_stats([{{QPid, XName}, 1}], confirm, State), + case sets:size(Qs1) of + 0 -> {[{MsgSeqNo, XName} | MXs], gb_trees:delete(MsgSeqNo, UC)}; + _ -> {MXs, gb_trees:update(MsgSeqNo, {XName, Qs1}, UC)} + end. + handle_method(#'channel.open'{}, _, State = #ch{state = starting}) -> {reply, #'channel.open_ok'{}, State#ch{state = running}}; @@ -343,66 +528,68 @@ handle_method(#'channel.close'{}, _, State = #ch{writer_pid = WriterPid}) -> ok = rollback_and_notify(State), - ok = rabbit_writer:send_command(WriterPid, #'channel.close_ok'{}), + ok = rabbit_writer:send_command_sync(WriterPid, #'channel.close_ok'{}), stop; handle_method(#'access.request'{},_, State) -> {reply, #'access.request_ok'{ticket = 1}, State}; -handle_method(#'basic.publish'{exchange = ExchangeNameBin, +handle_method(#'basic.publish'{exchange = ExchangeNameBin, routing_key = RoutingKey, - mandatory = Mandatory, - immediate = Immediate}, - Content, State = #ch{ virtual_host = VHostPath, - transaction_id = TxnKey, - writer_pid = WriterPid}) -> + mandatory = Mandatory, + immediate = Immediate}, + Content, State = #ch{virtual_host = VHostPath, + transaction_id = TxnKey, + confirm_enabled = ConfirmEnabled}) -> ExchangeName = rabbit_misc:r(VHostPath, exchange, ExchangeNameBin), check_write_permitted(ExchangeName, State), Exchange = rabbit_exchange:lookup_or_die(ExchangeName), + check_internal_exchange(Exchange), %% We decode the content's properties here because we're almost %% certain to want to look at delivery-mode and priority. DecodedContent = rabbit_binary_parser:ensure_content_decoded(Content), - PersistentKey = case is_message_persistent(DecodedContent) of - true -> rabbit_guid:guid(); - false -> none - end, - Message = #basic_message{exchange_name = ExchangeName, - routing_key = RoutingKey, - content = DecodedContent, - persistent_key = PersistentKey}, + check_user_id_header(DecodedContent#content.properties, State), + IsPersistent = is_message_persistent(DecodedContent), + {MsgSeqNo, State1} = + case ConfirmEnabled of + false -> {undefined, State}; + true -> SeqNo = State#ch.publish_seqno, + {SeqNo, State#ch{publish_seqno = SeqNo + 1}} + end, + Message = #basic_message{exchange_name = ExchangeName, + routing_key = RoutingKey, + content = DecodedContent, + guid = rabbit_guid:guid(), + is_persistent = IsPersistent}, {RoutingRes, DeliveredQPids} = rabbit_exchange:publish( Exchange, - rabbit_basic:delivery(Mandatory, Immediate, TxnKey, Message)), - case RoutingRes of - routed -> - ok; - unroutable -> - %% FIXME: 312 should be replaced by the ?NO_ROUTE - %% definition, when we move to >=0-9 - ok = basic_return(Message, WriterPid, 312, <<"unroutable">>); - not_delivered -> - %% FIXME: 313 should be replaced by the ?NO_CONSUMERS - %% definition, when we move to >=0-9 - ok = basic_return(Message, WriterPid, 313, <<"not_delivered">>) - end, + rabbit_basic:delivery(Mandatory, Immediate, TxnKey, Message, + MsgSeqNo)), + State2 = process_routing_result(RoutingRes, DeliveredQPids, ExchangeName, + MsgSeqNo, Message, State1), + maybe_incr_stats([{ExchangeName, 1} | + [{{QPid, ExchangeName}, 1} || + QPid <- DeliveredQPids]], publish, State2), {noreply, case TxnKey of - none -> State; - _ -> add_tx_participants(DeliveredQPids, State) + none -> State2; + _ -> add_tx_participants(DeliveredQPids, State2) end}; +handle_method(#'basic.nack'{delivery_tag = DeliveryTag, + multiple = Multiple, + requeue = Requeue}, + _, State) -> + reject(DeliveryTag, Requeue, Multiple, State); + handle_method(#'basic.ack'{delivery_tag = DeliveryTag, multiple = Multiple}, _, State = #ch{transaction_id = TxnKey, - next_tag = NextDeliveryTag, unacked_message_q = UAMQ}) -> - if DeliveryTag >= NextDeliveryTag -> - rabbit_misc:protocol_error( - command_invalid, "unknown delivery tag ~w", [DeliveryTag]); - true -> ok - end, {Acked, Remaining} = collect_acks(UAMQ, DeliveryTag, Multiple), - Participants = ack(TxnKey, Acked), + QIncs = ack(TxnKey, Acked), + Participants = [QPid || {QPid, _} <- QIncs], + maybe_incr_stats(QIncs, ack, State), {noreply, case TxnKey of none -> ok = notify_limiter(State#ch.limiter_pid, Acked), State#ch{unacked_message_q = Remaining}; @@ -416,19 +603,27 @@ handle_method(#'basic.get'{queue = QueueNameBin, no_ack = NoAck}, - _, State = #ch{ writer_pid = WriterPid, - next_tag = DeliveryTag }) -> + _, State = #ch{writer_pid = WriterPid, + reader_pid = ReaderPid, + next_tag = DeliveryTag}) -> QueueName = expand_queue_name_shortcut(QueueNameBin, State), check_read_permitted(QueueName, State), - case rabbit_amqqueue:with_or_die( - QueueName, + case rabbit_amqqueue:with_exclusive_access_or_die( + QueueName, ReaderPid, fun (Q) -> rabbit_amqqueue:basic_get(Q, self(), NoAck) end) of {ok, MessageCount, - Msg = {_QName, _QPid, _MsgId, Redelivered, + Msg = {_QName, QPid, _MsgId, Redelivered, #basic_message{exchange_name = ExchangeName, routing_key = RoutingKey, content = Content}}} -> - State1 = lock_message(not(NoAck), {DeliveryTag, none, Msg}, State), + State1 = lock_message(not(NoAck), + ack_record(DeliveryTag, none, Msg), + State), + maybe_incr_stats([{QPid, 1}], + case NoAck of + true -> get_no_ack; + false -> get + end, State), ok = rabbit_writer:send_command( WriterPid, #'basic.get_ok'{delivery_tag = DeliveryTag, @@ -439,18 +634,18 @@ Content), {noreply, State1#ch{next_tag = DeliveryTag + 1}}; empty -> - {reply, #'basic.get_empty'{cluster_id = <<>>}, State} + {reply, #'basic.get_empty'{}, State} end; -handle_method(#'basic.consume'{queue = QueueNameBin, +handle_method(#'basic.consume'{queue = QueueNameBin, consumer_tag = ConsumerTag, - no_local = _, % FIXME: implement - no_ack = NoAck, - exclusive = ExclusiveConsume, - nowait = NoWait}, - _, State = #ch{ reader_pid = ReaderPid, - limiter_pid = LimiterPid, - consumer_mapping = ConsumerMapping }) -> + no_local = _, % FIXME: implement + no_ack = NoAck, + exclusive = ExclusiveConsume, + nowait = NoWait}, + _, State = #ch{reader_pid = ReaderPid, + limiter_pid = LimiterPid, + consumer_mapping = ConsumerMapping }) -> case dict:find(ConsumerTag, ConsumerMapping) of error -> QueueName = expand_queue_name_shortcut(QueueNameBin, State), @@ -461,14 +656,14 @@ Other -> Other end, - %% In order to ensure that the consume_ok gets sent before - %% any messages are sent to the consumer, we get the queue - %% process to send the consume_ok on our behalf. - case rabbit_amqqueue:with_or_die( - QueueName, + %% We get the queue process to send the consume_ok on our + %% behalf. This is for symmetry with basic.cancel - see + %% the comment in that method for why. + case rabbit_amqqueue:with_exclusive_access_or_die( + QueueName, ReaderPid, fun (Q) -> rabbit_amqqueue:basic_consume( - Q, NoAck, ReaderPid, self(), LimiterPid, + Q, NoAck, self(), LimiterPid, ActualConsumerTag, ExclusiveConsume, ok_msg(NoWait, #'basic.consume_ok'{ consumer_tag = ActualConsumerTag})) @@ -478,14 +673,6 @@ dict:store(ActualConsumerTag, QueueName, ConsumerMapping)}}; - {error, queue_owned_by_another_connection} -> - %% The spec is silent on which exception to use - %% here. This seems reasonable? - %% FIXME: check this - - rabbit_misc:protocol_error( - resource_locked, "~s owned by another connection", - [rabbit_misc:rs(QueueName)]); {error, exclusive_consume_unavailable} -> rabbit_misc:protocol_error( access_refused, "~s in exclusive use", @@ -540,81 +727,67 @@ "prefetch_size!=0 (~w)", [Size]); handle_method(#'basic.qos'{prefetch_count = PrefetchCount}, - _, State = #ch{ limiter_pid = LimiterPid, - unacked_message_q = UAMQ }) -> - NewLimiterPid = case {LimiterPid, PrefetchCount} of - {undefined, 0} -> - undefined; - {undefined, _} -> - LPid = rabbit_limiter:start_link(self(), - queue:len(UAMQ)), - ok = limit_queues(LPid, State), - LPid; - {_, 0} -> - ok = rabbit_limiter:shutdown(LimiterPid), - ok = limit_queues(undefined, State), - undefined; - {_, _} -> - LimiterPid - end, - ok = rabbit_limiter:limit(NewLimiterPid, PrefetchCount), - {reply, #'basic.qos_ok'{}, State#ch{limiter_pid = NewLimiterPid}}; - -handle_method(#'basic.recover'{requeue = true}, - _, State = #ch{ transaction_id = none, - unacked_message_q = UAMQ }) -> + _, State = #ch{limiter_pid = LimiterPid}) -> + LimiterPid1 = case {LimiterPid, PrefetchCount} of + {undefined, 0} -> undefined; + {undefined, _} -> start_limiter(State); + {_, _} -> LimiterPid + end, + LimiterPid2 = case rabbit_limiter:limit(LimiterPid1, PrefetchCount) of + ok -> LimiterPid1; + stopped -> unlimit_queues(State) + end, + {reply, #'basic.qos_ok'{}, State#ch{limiter_pid = LimiterPid2}}; + +handle_method(#'basic.recover_async'{requeue = true}, + _, State = #ch{unacked_message_q = UAMQ, + limiter_pid = LimiterPid}) -> + OkFun = fun () -> ok end, ok = fold_per_queue( fun (QPid, MsgIds, ok) -> %% The Qpid python test suite incorrectly assumes %% that messages will be requeued in their original %% order. To keep it happy we reverse the id list %% since we are given them in reverse order. - rabbit_amqqueue:requeue( - QPid, lists:reverse(MsgIds), self()) + rabbit_misc:with_exit_handler( + OkFun, fun () -> + rabbit_amqqueue:requeue( + QPid, lists:reverse(MsgIds), self()) + end) end, ok, UAMQ), - %% No answer required, apparently! + ok = notify_limiter(LimiterPid, UAMQ), + %% No answer required - basic.recover is the newer, synchronous + %% variant of this method {noreply, State#ch{unacked_message_q = queue:new()}}; -handle_method(#'basic.recover'{requeue = false}, - _, State = #ch{ transaction_id = none, - writer_pid = WriterPid, - unacked_message_q = UAMQ }) -> - ok = rabbit_misc:queue_fold( - fun ({_DeliveryTag, none, _Msg}, ok) -> - %% Was sent as a basic.get_ok. Don't redeliver - %% it. FIXME: appropriate? - ok; - ({DeliveryTag, ConsumerTag, - {QName, QPid, MsgId, _Redelivered, Message}}, ok) -> - %% Was sent as a proper consumer delivery. Resend - %% it as before. - %% - %% FIXME: What should happen if the consumer's been - %% cancelled since? - %% - %% FIXME: should we allocate a fresh DeliveryTag? - internal_deliver( - WriterPid, false, ConsumerTag, DeliveryTag, - {QName, QPid, MsgId, true, Message}) - end, ok, UAMQ), - %% No answer required, apparently! - {noreply, State}; +handle_method(#'basic.recover_async'{requeue = false}, _, _State) -> + rabbit_misc:protocol_error(not_implemented, "requeue=false", []); -handle_method(#'basic.recover'{}, _, _State) -> - rabbit_misc:protocol_error( - not_allowed, "attempt to recover a transactional channel",[]); +handle_method(#'basic.recover'{requeue = Requeue}, Content, State) -> + {noreply, State2 = #ch{writer_pid = WriterPid}} = + handle_method(#'basic.recover_async'{requeue = Requeue}, + Content, + State), + ok = rabbit_writer:send_command(WriterPid, #'basic.recover_ok'{}), + {noreply, State2}; + +handle_method(#'basic.reject'{delivery_tag = DeliveryTag, + requeue = Requeue}, + _, State) -> + reject(DeliveryTag, Requeue, false, State); handle_method(#'exchange.declare'{exchange = ExchangeNameBin, type = TypeNameBin, passive = false, durable = Durable, auto_delete = AutoDelete, - internal = false, + internal = Internal, nowait = NoWait, arguments = Args}, - _, State = #ch{ virtual_host = VHostPath }) -> + _, State = #ch{virtual_host = VHostPath}) -> CheckedType = rabbit_exchange:check_type(TypeNameBin), ExchangeName = rabbit_misc:r(VHostPath, exchange, ExchangeNameBin), + check_not_default_exchange(ExchangeName), check_configure_permitted(ExchangeName, State), X = case rabbit_exchange:lookup(ExchangeName) of {ok, FoundX} -> FoundX; @@ -631,27 +804,29 @@ CheckedType, Durable, AutoDelete, + Internal, Args) end, - ok = rabbit_exchange:assert_type(X, CheckedType), + ok = rabbit_exchange:assert_equivalence(X, CheckedType, Durable, + AutoDelete, Internal, Args), return_ok(State, NoWait, #'exchange.declare_ok'{}); handle_method(#'exchange.declare'{exchange = ExchangeNameBin, - type = TypeNameBin, passive = true, nowait = NoWait}, - _, State = #ch{ virtual_host = VHostPath }) -> + _, State = #ch{virtual_host = VHostPath}) -> ExchangeName = rabbit_misc:r(VHostPath, exchange, ExchangeNameBin), check_configure_permitted(ExchangeName, State), - X = rabbit_exchange:lookup_or_die(ExchangeName), - ok = rabbit_exchange:assert_type(X, rabbit_exchange:check_type(TypeNameBin)), + check_not_default_exchange(ExchangeName), + _ = rabbit_exchange:lookup_or_die(ExchangeName), return_ok(State, NoWait, #'exchange.declare_ok'{}); handle_method(#'exchange.delete'{exchange = ExchangeNameBin, if_unused = IfUnused, nowait = NoWait}, - _, State = #ch { virtual_host = VHostPath }) -> + _, State = #ch{virtual_host = VHostPath}) -> ExchangeName = rabbit_misc:r(VHostPath, exchange, ExchangeNameBin), + check_not_default_exchange(ExchangeName), check_configure_permitted(ExchangeName, State), case rabbit_exchange:delete(ExchangeName, IfUnused) of {error, not_found} -> @@ -663,73 +838,97 @@ return_ok(State, NoWait, #'exchange.delete_ok'{}) end; -handle_method(#'queue.declare'{queue = QueueNameBin, - passive = false, - durable = Durable, - exclusive = ExclusiveDeclare, - auto_delete = AutoDelete, +handle_method(#'exchange.bind'{destination = DestinationNameBin, + source = SourceNameBin, + routing_key = RoutingKey, nowait = NoWait, - arguments = Args}, - _, State = #ch { virtual_host = VHostPath, - reader_pid = ReaderPid }) -> - %% FIXME: atomic create&claim - Finish = - fun (Q) -> - if ExclusiveDeclare -> - case rabbit_amqqueue:claim_queue(Q, ReaderPid) of - locked -> - %% AMQP 0-8 doesn't say which - %% exception to use, so we mimic QPid - %% here. - rabbit_misc:protocol_error( - resource_locked, - "cannot obtain exclusive access to locked ~s", - [rabbit_misc:rs(Q#amqqueue.name)]); - ok -> ok - end; - true -> - ok - end, - Q - end, - Q = case rabbit_amqqueue:with( - rabbit_misc:r(VHostPath, queue, QueueNameBin), - Finish) of - {error, not_found} -> - ActualNameBin = - case QueueNameBin of + arguments = Arguments}, _, State) -> + binding_action(fun rabbit_binding:add/2, + SourceNameBin, exchange, DestinationNameBin, RoutingKey, + Arguments, #'exchange.bind_ok'{}, NoWait, State); + +handle_method(#'exchange.unbind'{destination = DestinationNameBin, + source = SourceNameBin, + routing_key = RoutingKey, + nowait = NoWait, + arguments = Arguments}, _, State) -> + binding_action(fun rabbit_binding:remove/2, + SourceNameBin, exchange, DestinationNameBin, RoutingKey, + Arguments, #'exchange.unbind_ok'{}, NoWait, State); + +handle_method(#'queue.declare'{queue = QueueNameBin, + passive = false, + durable = Durable, + exclusive = ExclusiveDeclare, + auto_delete = AutoDelete, + nowait = NoWait, + arguments = Args} = Declare, + _, State = #ch{virtual_host = VHostPath, + reader_pid = ReaderPid, + queue_collector_pid = CollectorPid}) -> + Owner = case ExclusiveDeclare of + true -> ReaderPid; + false -> none + end, + ActualNameBin = case QueueNameBin of <<>> -> rabbit_guid:binstring_guid("amq.gen"); Other -> check_name('queue', Other) end, - QueueName = rabbit_misc:r(VHostPath, queue, ActualNameBin), - check_configure_permitted(QueueName, State), - Finish(rabbit_amqqueue:declare(QueueName, - Durable, AutoDelete, Args)); - Other = #amqqueue{name = QueueName} -> - check_configure_permitted(QueueName, State), - Other - end, - return_queue_declare_ok(State, NoWait, Q); + QueueName = rabbit_misc:r(VHostPath, queue, ActualNameBin), + check_configure_permitted(QueueName, State), + case rabbit_amqqueue:with( + QueueName, + fun (Q) -> ok = rabbit_amqqueue:assert_equivalence( + Q, Durable, AutoDelete, Args, Owner), + rabbit_amqqueue:stat(Q) + end) of + {ok, MessageCount, ConsumerCount} -> + return_queue_declare_ok(QueueName, NoWait, MessageCount, + ConsumerCount, State); + {error, not_found} -> + case rabbit_amqqueue:declare(QueueName, Durable, AutoDelete, + Args, Owner) of + {new, Q = #amqqueue{}} -> + %% We need to notify the reader within the channel + %% process so that we can be sure there are no + %% outstanding exclusive queues being declared as + %% the connection shuts down. + ok = case Owner of + none -> ok; + _ -> rabbit_queue_collector:register( + CollectorPid, Q) + end, + return_queue_declare_ok(QueueName, NoWait, 0, 0, State); + {existing, _Q} -> + %% must have been created between the stat and the + %% declare. Loop around again. + handle_method(Declare, none, State) + end + end; -handle_method(#'queue.declare'{queue = QueueNameBin, +handle_method(#'queue.declare'{queue = QueueNameBin, passive = true, - nowait = NoWait}, - _, State = #ch{ virtual_host = VHostPath }) -> + nowait = NoWait}, + _, State = #ch{virtual_host = VHostPath, + reader_pid = ReaderPid}) -> QueueName = rabbit_misc:r(VHostPath, queue, QueueNameBin), check_configure_permitted(QueueName, State), - Q = rabbit_amqqueue:with_or_die(QueueName, fun (Q) -> Q end), - return_queue_declare_ok(State, NoWait, Q); + {{ok, MessageCount, ConsumerCount}, #amqqueue{} = Q} = + rabbit_amqqueue:with_or_die( + QueueName, fun (Q) -> {rabbit_amqqueue:stat(Q), Q} end), + ok = rabbit_amqqueue:check_exclusive_access(Q, ReaderPid), + return_queue_declare_ok(QueueName, NoWait, MessageCount, ConsumerCount, + State); handle_method(#'queue.delete'{queue = QueueNameBin, if_unused = IfUnused, if_empty = IfEmpty, - nowait = NoWait - }, - _, State) -> + nowait = NoWait}, + _, State = #ch{reader_pid = ReaderPid}) -> QueueName = expand_queue_name_shortcut(QueueNameBin, State), check_configure_permitted(QueueName, State), - case rabbit_amqqueue:with_or_die( - QueueName, + case rabbit_amqqueue:with_exclusive_access_or_die( + QueueName, ReaderPid, fun (Q) -> rabbit_amqqueue:delete(Q, IfUnused, IfEmpty) end) of {error, in_use} -> rabbit_misc:protocol_error( @@ -739,8 +938,7 @@ precondition_failed, "~s not empty", [rabbit_misc:rs(QueueName)]); {ok, PurgedMessageCount} -> return_ok(State, NoWait, - #'queue.delete_ok'{ - message_count = PurgedMessageCount}) + #'queue.delete_ok'{message_count = PurgedMessageCount}) end; handle_method(#'queue.bind'{queue = QueueNameBin, @@ -748,29 +946,34 @@ routing_key = RoutingKey, nowait = NoWait, arguments = Arguments}, _, State) -> - binding_action(fun rabbit_exchange:add_binding/4, ExchangeNameBin, - QueueNameBin, RoutingKey, Arguments, #'queue.bind_ok'{}, - NoWait, State); + binding_action(fun rabbit_binding:add/2, + ExchangeNameBin, queue, QueueNameBin, RoutingKey, Arguments, + #'queue.bind_ok'{}, NoWait, State); handle_method(#'queue.unbind'{queue = QueueNameBin, exchange = ExchangeNameBin, routing_key = RoutingKey, arguments = Arguments}, _, State) -> - binding_action(fun rabbit_exchange:delete_binding/4, ExchangeNameBin, - QueueNameBin, RoutingKey, Arguments, #'queue.unbind_ok'{}, - false, State); + binding_action(fun rabbit_binding:remove/2, + ExchangeNameBin, queue, QueueNameBin, RoutingKey, Arguments, + #'queue.unbind_ok'{}, false, State); handle_method(#'queue.purge'{queue = QueueNameBin, nowait = NoWait}, - _, State) -> + _, State = #ch{reader_pid = ReaderPid}) -> QueueName = expand_queue_name_shortcut(QueueNameBin, State), check_read_permitted(QueueName, State), - {ok, PurgedMessageCount} = rabbit_amqqueue:with_or_die( - QueueName, + {ok, PurgedMessageCount} = rabbit_amqqueue:with_exclusive_access_or_die( + QueueName, ReaderPid, fun (Q) -> rabbit_amqqueue:purge(Q) end), return_ok(State, NoWait, #'queue.purge_ok'{message_count = PurgedMessageCount}); + +handle_method(#'tx.select'{}, _, #ch{confirm_enabled = true}) -> + rabbit_misc:protocol_error( + precondition_failed, "cannot switch from confirm to tx mode", []); + handle_method(#'tx.select'{}, _, State = #ch{transaction_id = none}) -> {reply, #'tx.select_ok'{}, new_tx(State)}; @@ -779,27 +982,52 @@ handle_method(#'tx.commit'{}, _, #ch{transaction_id = none}) -> rabbit_misc:protocol_error( - not_allowed, "channel is not transactional", []); + precondition_failed, "channel is not transactional", []); handle_method(#'tx.commit'{}, _, State) -> {reply, #'tx.commit_ok'{}, internal_commit(State)}; handle_method(#'tx.rollback'{}, _, #ch{transaction_id = none}) -> rabbit_misc:protocol_error( - not_allowed, "channel is not transactional", []); + precondition_failed, "channel is not transactional", []); handle_method(#'tx.rollback'{}, _, State) -> {reply, #'tx.rollback_ok'{}, internal_rollback(State)}; -handle_method(#'channel.flow'{active = _}, _, State) -> - %% FIXME: implement - {reply, #'channel.flow_ok'{active = true}, State}; - -handle_method(#'channel.flow_ok'{active = _}, _, State) -> - %% TODO: We may want to correlate this to channel.flow messages we - %% have sent, and complain if we get an unsolicited - %% channel.flow_ok, or the client refuses our flow request. - {noreply, State}; +handle_method(#'confirm.select'{}, _, #ch{transaction_id = TxId}) + when TxId =/= none -> + rabbit_misc:protocol_error( + precondition_failed, "cannot switch from tx to confirm mode", []); + +handle_method(#'confirm.select'{nowait = NoWait}, _, State) -> + return_ok(State#ch{confirm_enabled = true}, + NoWait, #'confirm.select_ok'{}); + +handle_method(#'channel.flow'{active = true}, _, + State = #ch{limiter_pid = LimiterPid}) -> + LimiterPid1 = case rabbit_limiter:unblock(LimiterPid) of + ok -> LimiterPid; + stopped -> unlimit_queues(State) + end, + {reply, #'channel.flow_ok'{active = true}, + State#ch{limiter_pid = LimiterPid1}}; + +handle_method(#'channel.flow'{active = false}, _, + State = #ch{limiter_pid = LimiterPid, + consumer_mapping = Consumers}) -> + LimiterPid1 = case LimiterPid of + undefined -> start_limiter(State); + Other -> Other + end, + State1 = State#ch{limiter_pid = LimiterPid1}, + ok = rabbit_limiter:block(LimiterPid1), + case consumer_queues(Consumers) of + [] -> {reply, #'channel.flow_ok'{active = false}, State1}; + QPids -> Queues = [{QPid, erlang:monitor(process, QPid)} || + QPid <- QPids], + ok = rabbit_amqqueue:flush_all(QPids, self()), + {noreply, State1#ch{blocking = dict:from_list(Queues)}} + end; handle_method(_MethodRecord, _Content, _State) -> rabbit_misc:protocol_error( @@ -807,43 +1035,55 @@ %%---------------------------------------------------------------------------- -binding_action(Fun, ExchangeNameBin, QueueNameBin, RoutingKey, Arguments, - ReturnMethod, NoWait, State = #ch{virtual_host = VHostPath}) -> +binding_action(Fun, ExchangeNameBin, DestinationType, DestinationNameBin, + RoutingKey, Arguments, ReturnMethod, NoWait, + State = #ch{virtual_host = VHostPath, + reader_pid = ReaderPid}) -> %% FIXME: connection exception (!) on failure?? %% (see rule named "failure" in spec-XML) %% FIXME: don't allow binding to internal exchanges - %% including the one named "" ! - QueueName = expand_queue_name_shortcut(QueueNameBin, State), - check_write_permitted(QueueName, State), - ActualRoutingKey = expand_routing_key_shortcut(QueueNameBin, RoutingKey, - State), + {DestinationName, ActualRoutingKey} = + expand_binding(DestinationType, DestinationNameBin, RoutingKey, State), + check_write_permitted(DestinationName, State), ExchangeName = rabbit_misc:r(VHostPath, exchange, ExchangeNameBin), + [check_not_default_exchange(N) || N <- [DestinationName, ExchangeName]], check_read_permitted(ExchangeName, State), - case Fun(ExchangeName, QueueName, ActualRoutingKey, Arguments) of - {error, exchange_not_found} -> + case Fun(#binding{source = ExchangeName, + destination = DestinationName, + key = ActualRoutingKey, + args = Arguments}, + fun (_X, Q = #amqqueue{}) -> + try rabbit_amqqueue:check_exclusive_access(Q, ReaderPid) + catch exit:Reason -> {error, Reason} + end; + (_X, #exchange{}) -> + ok + end) of + {error, source_not_found} -> rabbit_misc:not_found(ExchangeName); - {error, queue_not_found} -> - rabbit_misc:not_found(QueueName); - {error, exchange_and_queue_not_found} -> + {error, destination_not_found} -> + rabbit_misc:not_found(DestinationName); + {error, source_and_destination_not_found} -> rabbit_misc:protocol_error( not_found, "no ~s and no ~s", [rabbit_misc:rs(ExchangeName), - rabbit_misc:rs(QueueName)]); + rabbit_misc:rs(DestinationName)]); {error, binding_not_found} -> rabbit_misc:protocol_error( not_found, "no binding ~s between ~s and ~s", [RoutingKey, rabbit_misc:rs(ExchangeName), - rabbit_misc:rs(QueueName)]); - {error, durability_settings_incompatible} -> - rabbit_misc:protocol_error( - not_allowed, "durability settings of ~s incompatible with ~s", - [rabbit_misc:rs(QueueName), rabbit_misc:rs(ExchangeName)]); + rabbit_misc:rs(DestinationName)]); + {error, #amqp_error{} = Error} -> + rabbit_misc:protocol_error(Error); ok -> return_ok(State, NoWait, ReturnMethod) end. basic_return(#basic_message{exchange_name = ExchangeName, routing_key = RoutingKey, content = Content}, - WriterPid, ReplyCode, ReplyText) -> + WriterPid, Reason) -> + {_Close, ReplyCode, ReplyText} = + rabbit_framing_amqp_0_9_1:lookup_amqp_exception(Reason), ok = rabbit_writer:send_command( WriterPid, #'basic.return'{reply_code = ReplyCode, @@ -852,6 +1092,19 @@ routing_key = RoutingKey}, Content). +reject(DeliveryTag, Requeue, Multiple, State = #ch{unacked_message_q = UAMQ}) -> + {Acked, Remaining} = collect_acks(UAMQ, DeliveryTag, Multiple), + ok = fold_per_queue( + fun (QPid, MsgIds, ok) -> + rabbit_amqqueue:reject(QPid, MsgIds, Requeue, self()) + end, ok, Acked), + ok = notify_limiter(State#ch.limiter_pid, Acked), + {noreply, State#ch{unacked_message_q = Remaining}}. + +ack_record(DeliveryTag, ConsumerTag, + _MsgStruct = {_QName, QPid, MsgId, _Redelivered, _Msg}) -> + {DeliveryTag, ConsumerTag, {QPid, MsgId}}. + collect_acks(Q, 0, true) -> {Q, queue:new()}; collect_acks(Q, DeliveryTag, Multiple) -> @@ -871,7 +1124,8 @@ QTail, DeliveryTag, Multiple) end; {empty, _} -> - {ToAcc, PrefixAcc} + rabbit_misc:protocol_error( + precondition_failed, "unknown delivery tag ~w", [DeliveryTag]) end. add_tx_participants(MoreP, State = #ch{tx_participants = Participants}) -> @@ -882,7 +1136,7 @@ fold_per_queue( fun (QPid, MsgIds, L) -> ok = rabbit_amqqueue:ack(QPid, TxnKey, MsgIds, self()), - [QPid | L] + [{QPid, length(MsgIds)} | L] end, [], UAQ). make_tx_id() -> rabbit_guid:guid(). @@ -895,7 +1149,7 @@ internal_commit(State = #ch{transaction_id = TxnKey, tx_participants = Participants}) -> case rabbit_amqqueue:commit_all(sets:to_list(Participants), - TxnKey) of + TxnKey, self()) of ok -> ok = notify_limiter(State#ch.limiter_pid, State#ch.uncommitted_ack_q), new_tx(State); @@ -911,13 +1165,10 @@ [self(), queue:len(UAQ), queue:len(UAMQ)]), - case rabbit_amqqueue:rollback_all(sets:to_list(Participants), - TxnKey) of - ok -> NewUAMQ = queue:join(UAQ, UAMQ), - new_tx(State#ch{unacked_message_q = NewUAMQ}); - {error, Errors} -> rabbit_misc:protocol_error( - internal_error, "rollback failed: ~w", [Errors]) - end. + ok = rabbit_amqqueue:rollback_all(sets:to_list(Participants), + TxnKey, self()), + NewUAMQ = queue:join(UAQ, UAMQ), + new_tx(State#ch{unacked_message_q = NewUAMQ}). rollback_and_notify(State = #ch{transaction_id = none}) -> notify_queues(State); @@ -926,23 +1177,28 @@ fold_per_queue(F, Acc0, UAQ) -> D = rabbit_misc:queue_fold( - fun ({_DTag, _CTag, - {_QName, QPid, MsgId, _Redelivered, _Message}}, D) -> - %% dict:append would be simpler and avoid the - %% lists:reverse in handle_message({recover, true}, - %% ...). However, it is significantly slower when - %% going beyond a few thousand elements. - dict:update(QPid, - fun (MsgIds) -> [MsgId | MsgIds] end, - [MsgId], - D) + fun ({_DTag, _CTag, {QPid, MsgId}}, D) -> + %% dict:append would avoid the lists:reverse in + %% handle_message({recover, true}, ...). However, it + %% is significantly slower when going beyond a few + %% thousand elements. + rabbit_misc:dict_cons(QPid, MsgId, D) end, dict:new(), UAQ), dict:fold(fun (QPid, MsgIds, Acc) -> F(QPid, MsgIds, Acc) end, Acc0, D). +start_limiter(State = #ch{unacked_message_q = UAMQ, start_limiter_fun = SLF}) -> + {ok, LPid} = SLF(queue:len(UAMQ)), + ok = limit_queues(LPid, State), + LPid. + notify_queues(#ch{consumer_mapping = Consumers}) -> rabbit_amqqueue:notify_down_all(consumer_queues(Consumers), self()). +unlimit_queues(State) -> + ok = limit_queues(undefined, State), + undefined. + limit_queues(LPid, #ch{consumer_mapping = Consumers}) -> rabbit_amqqueue:limit_all(consumer_queues(Consumers), self(), LPid). @@ -972,54 +1228,100 @@ Count -> rabbit_limiter:ack(LimiterPid, Count) end. -is_message_persistent(#content{properties = #'P_basic'{ - delivery_mode = Mode}}) -> - case Mode of - 1 -> false; - 2 -> true; - undefined -> false; - Other -> rabbit_log:warning("Unknown delivery mode ~p - " - "treating as 1, non-persistent~n", - [Other]), - false +is_message_persistent(Content) -> + case rabbit_basic:is_message_persistent(Content) of + {invalid, Other} -> + rabbit_log:warning("Unknown delivery mode ~p - " + "treating as 1, non-persistent~n", + [Other]), + false; + IsPersistent when is_boolean(IsPersistent) -> + IsPersistent end. +process_routing_result(unroutable, _, XName, MsgSeqNo, Msg, State) -> + ok = basic_return(Msg, State#ch.writer_pid, no_route), + record_confirm(MsgSeqNo, XName, State); +process_routing_result(not_delivered, _, XName, MsgSeqNo, Msg, State) -> + ok = basic_return(Msg, State#ch.writer_pid, no_consumers), + record_confirm(MsgSeqNo, XName, State); +process_routing_result(routed, [], XName, MsgSeqNo, _, State) -> + record_confirm(MsgSeqNo, XName, State); +process_routing_result(routed, _, _, undefined, _, State) -> + State; +process_routing_result(routed, QPids, XName, MsgSeqNo, _, State) -> + #ch{unconfirmed = UC} = State, + [maybe_monitor(QPid) || QPid <- QPids], + UC1 = gb_trees:insert(MsgSeqNo, {XName, sets:from_list(QPids)}, UC), + State#ch{unconfirmed = UC1}. + lock_message(true, MsgStruct, State = #ch{unacked_message_q = UAMQ}) -> State#ch{unacked_message_q = queue:in(MsgStruct, UAMQ)}; lock_message(false, _MsgStruct, State) -> State. -internal_deliver(WriterPid, Notify, ConsumerTag, DeliveryTag, - {_QName, QPid, _MsgId, Redelivered, - #basic_message{exchange_name = ExchangeName, - routing_key = RoutingKey, - content = Content}}) -> - M = #'basic.deliver'{consumer_tag = ConsumerTag, - delivery_tag = DeliveryTag, - redelivered = Redelivered, - exchange = ExchangeName#resource.name, - routing_key = RoutingKey}, - ok = case Notify of - true -> rabbit_writer:send_command_and_notify( - WriterPid, QPid, self(), M, Content); - false -> rabbit_writer:send_command(WriterPid, M, Content) - end. +send_nacks([], State) -> + State; +send_nacks(MXs, State) -> + MsgSeqNos = [ MsgSeqNo || {MsgSeqNo, _} <- MXs ], + coalesce_and_send(MsgSeqNos, + fun(MsgSeqNo, Multiple) -> + #'basic.nack'{delivery_tag = MsgSeqNo, + multiple = Multiple} + end, State). + +send_confirms(State = #ch{confirmed = C}) -> + C1 = lists:append(C), + MsgSeqNos = [ begin maybe_incr_stats([{ExchangeName, 1}], confirm, State), + MsgSeqNo + end || {MsgSeqNo, ExchangeName} <- C1 ], + send_confirms(MsgSeqNos, State #ch{confirmed = []}). +send_confirms([], State) -> + State; +send_confirms([MsgSeqNo], State = #ch{writer_pid = WriterPid}) -> + ok = rabbit_writer:send_command(WriterPid, + #'basic.ack'{delivery_tag = MsgSeqNo}), + State; +send_confirms(Cs, State) -> + coalesce_and_send(Cs, fun(MsgSeqNo, Multiple) -> + #'basic.ack'{delivery_tag = MsgSeqNo, + multiple = Multiple} + end, State). + +coalesce_and_send(MsgSeqNos, MkMsgFun, + State = #ch{writer_pid = WriterPid, unconfirmed = UC}) -> + SMsgSeqNos = lists:usort(MsgSeqNos), + CutOff = case gb_trees:is_empty(UC) of + true -> lists:last(SMsgSeqNos) + 1; + false -> {SeqNo, _XQ} = gb_trees:smallest(UC), SeqNo + end, + {Ms, Ss} = lists:splitwith(fun(X) -> X < CutOff end, SMsgSeqNos), + case Ms of + [] -> ok; + _ -> ok = rabbit_writer:send_command( + WriterPid, MkMsgFun(lists:last(Ms), true)) + end, + [ok = rabbit_writer:send_command( + WriterPid, MkMsgFun(SeqNo, false)) || SeqNo <- Ss], + State. -terminate(#ch{writer_pid = WriterPid, limiter_pid = LimiterPid}) -> +terminate(_State) -> pg_local:leave(rabbit_channels, self()), - rabbit_writer:shutdown(WriterPid), - rabbit_limiter:shutdown(LimiterPid). + rabbit_event:notify(channel_closed, [{pid, self()}]). infos(Items, State) -> [{Item, i(Item, State)} || Item <- Items]. i(pid, _) -> self(); i(connection, #ch{reader_pid = ReaderPid}) -> ReaderPid; i(number, #ch{channel = Channel}) -> Channel; -i(user, #ch{username = Username}) -> Username; +i(user, #ch{user = User}) -> User#user.username; i(vhost, #ch{virtual_host = VHost}) -> VHost; i(transactional, #ch{transaction_id = TxnKey}) -> TxnKey =/= none; +i(confirm, #ch{confirm_enabled = CE}) -> CE; i(consumer_count, #ch{consumer_mapping = ConsumerMapping}) -> dict:size(ConsumerMapping); +i(messages_unconfirmed, #ch{unconfirmed = UC}) -> + gb_trees:size(UC); i(messages_unacknowledged, #ch{unacked_message_q = UAMQ, uncommitted_ack_q = UAQ}) -> queue:len(UAMQ) + queue:len(UAQ); @@ -1027,5 +1329,68 @@ queue:len(UAQ); i(prefetch_count, #ch{limiter_pid = LimiterPid}) -> rabbit_limiter:get_limit(LimiterPid); +i(client_flow_blocked, #ch{limiter_pid = LimiterPid}) -> + rabbit_limiter:is_blocked(LimiterPid); i(Item, _) -> throw({bad_argument, Item}). + +maybe_incr_stats(QXIncs, Measure, #ch{stats_timer = StatsTimer}) -> + case rabbit_event:stats_level(StatsTimer) of + fine -> [incr_stats(QX, Inc, Measure) || {QX, Inc} <- QXIncs]; + _ -> ok + end. + +incr_stats({QPid, _} = QX, Inc, Measure) -> + maybe_monitor(QPid), + update_measures(queue_exchange_stats, QX, Inc, Measure); +incr_stats(QPid, Inc, Measure) when is_pid(QPid) -> + maybe_monitor(QPid), + update_measures(queue_stats, QPid, Inc, Measure); +incr_stats(X, Inc, Measure) -> + update_measures(exchange_stats, X, Inc, Measure). + +maybe_monitor(QPid) -> + case get({monitoring, QPid}) of + undefined -> erlang:monitor(process, QPid), + put({monitoring, QPid}, true); + _ -> ok + end. + +update_measures(Type, QX, Inc, Measure) -> + Measures = case get({Type, QX}) of + undefined -> []; + D -> D + end, + Cur = case orddict:find(Measure, Measures) of + error -> 0; + {ok, C} -> C + end, + put({Type, QX}, + orddict:store(Measure, Cur + Inc, Measures)). + +internal_emit_stats(State) -> + internal_emit_stats(State, []). + +internal_emit_stats(State = #ch{stats_timer = StatsTimer}, Extra) -> + CoarseStats = infos(?STATISTICS_KEYS, State), + case rabbit_event:stats_level(StatsTimer) of + coarse -> + rabbit_event:notify(channel_stats, Extra ++ CoarseStats); + fine -> + FineStats = + [{channel_queue_stats, + [{QPid, Stats} || {{queue_stats, QPid}, Stats} <- get()]}, + {channel_exchange_stats, + [{X, Stats} || {{exchange_stats, X}, Stats} <- get()]}, + {channel_queue_exchange_stats, + [{QX, Stats} || + {{queue_exchange_stats, QX}, Stats} <- get()]}], + rabbit_event:notify(channel_stats, + Extra ++ CoarseStats ++ FineStats) + end. + +erase_queue_stats(QPid) -> + erase({monitoring, QPid}), + erase({queue_stats, QPid}), + [erase({queue_exchange_stats, QX}) || + {{queue_exchange_stats, QX = {QPid0, _}}, _} <- get(), QPid =:= QPid0]. diff -Nru rabbitmq-server-1.7.2/src/rabbit_channel_sup.erl rabbitmq-server-2.3.1/src/rabbit_channel_sup.erl --- rabbitmq-server-1.7.2/src/rabbit_channel_sup.erl 1970-01-01 00:00:00.000000000 +0000 +++ rabbitmq-server-2.3.1/src/rabbit_channel_sup.erl 2011-02-03 12:47:35.000000000 +0000 @@ -0,0 +1,88 @@ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License +%% at http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and +%% limitations under the License. +%% +%% The Original Code is RabbitMQ. +%% +%% The Initial Developer of the Original Code is VMware, Inc. +%% Copyright (c) 2007-2011 VMware, Inc. All rights reserved. +%% + +-module(rabbit_channel_sup). + +-behaviour(supervisor2). + +-export([start_link/1]). + +-export([init/1]). + +-include("rabbit.hrl"). + +%%---------------------------------------------------------------------------- + +-ifdef(use_specs). + +-export_type([start_link_args/0]). + +-type(start_link_args() :: + {'tcp', rabbit_types:protocol(), rabbit_net:socket(), + rabbit_channel:channel_number(), non_neg_integer(), pid(), + rabbit_types:user(), rabbit_types:vhost(), pid()} | + {'direct', rabbit_channel:channel_number(), pid(), rabbit_types:user(), + rabbit_types:vhost(), pid()}). + +-spec(start_link/1 :: (start_link_args()) -> {'ok', pid(), {pid(), any()}}). + +-endif. + +%%---------------------------------------------------------------------------- + +start_link({tcp, Protocol, Sock, Channel, FrameMax, ReaderPid, User, VHost, + Collector}) -> + {ok, SupPid} = supervisor2:start_link(?MODULE, []), + {ok, WriterPid} = + supervisor2:start_child( + SupPid, + {writer, {rabbit_writer, start_link, + [Sock, Channel, FrameMax, Protocol, ReaderPid]}, + intrinsic, ?MAX_WAIT, worker, [rabbit_writer]}), + {ok, ChannelPid} = + supervisor2:start_child( + SupPid, + {channel, {rabbit_channel, start_link, + [Channel, ReaderPid, WriterPid, User, VHost, + Collector, start_limiter_fun(SupPid)]}, + intrinsic, ?MAX_WAIT, worker, [rabbit_channel]}), + {ok, AState} = rabbit_command_assembler:init(Protocol), + {ok, SupPid, {ChannelPid, AState}}; +start_link({direct, Channel, ClientChannelPid, User, VHost, Collector}) -> + {ok, SupPid} = supervisor2:start_link(?MODULE, []), + {ok, ChannelPid} = + supervisor2:start_child( + SupPid, + {channel, {rabbit_channel, start_link, + [Channel, ClientChannelPid, ClientChannelPid, + User, VHost, Collector, start_limiter_fun(SupPid)]}, + intrinsic, ?MAX_WAIT, worker, [rabbit_channel]}), + {ok, SupPid, {ChannelPid, none}}. + +%%---------------------------------------------------------------------------- + +init([]) -> + {ok, {{one_for_all, 0, 1}, []}}. + +start_limiter_fun(SupPid) -> + fun (UnackedCount) -> + Me = self(), + {ok, _Pid} = + supervisor2:start_child( + SupPid, + {limiter, {rabbit_limiter, start_link, [Me, UnackedCount]}, + transient, ?MAX_WAIT, worker, [rabbit_limiter]}) + end. diff -Nru rabbitmq-server-1.7.2/src/rabbit_channel_sup_sup.erl rabbitmq-server-2.3.1/src/rabbit_channel_sup_sup.erl --- rabbitmq-server-1.7.2/src/rabbit_channel_sup_sup.erl 1970-01-01 00:00:00.000000000 +0000 +++ rabbitmq-server-2.3.1/src/rabbit_channel_sup_sup.erl 2011-02-03 12:47:35.000000000 +0000 @@ -0,0 +1,48 @@ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License +%% at http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and +%% limitations under the License. +%% +%% The Original Code is RabbitMQ. +%% +%% The Initial Developer of the Original Code is VMware, Inc. +%% Copyright (c) 2007-2011 VMware, Inc. All rights reserved. +%% + +-module(rabbit_channel_sup_sup). + +-behaviour(supervisor2). + +-export([start_link/0, start_channel/2]). + +-export([init/1]). + +%%---------------------------------------------------------------------------- + +-ifdef(use_specs). + +-spec(start_link/0 :: () -> rabbit_types:ok_pid_or_error()). +-spec(start_channel/2 :: (pid(), rabbit_channel_sup:start_link_args()) -> + {'ok', pid(), {pid(), any()}}). + +-endif. + +%%---------------------------------------------------------------------------- + +start_link() -> + supervisor2:start_link(?MODULE, []). + +start_channel(Pid, Args) -> + supervisor2:start_child(Pid, [Args]). + +%%---------------------------------------------------------------------------- + +init([]) -> + {ok, {{simple_one_for_one_terminate, 0, 1}, + [{channel_sup, {rabbit_channel_sup, start_link, []}, + temporary, infinity, supervisor, [rabbit_channel_sup]}]}}. diff -Nru rabbitmq-server-1.7.2/src/rabbit_client_sup.erl rabbitmq-server-2.3.1/src/rabbit_client_sup.erl --- rabbitmq-server-1.7.2/src/rabbit_client_sup.erl 1970-01-01 00:00:00.000000000 +0000 +++ rabbitmq-server-2.3.1/src/rabbit_client_sup.erl 2011-02-03 12:47:35.000000000 +0000 @@ -0,0 +1,48 @@ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License +%% at http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and +%% limitations under the License. +%% +%% The Original Code is RabbitMQ. +%% +%% The Initial Developer of the Original Code is VMware, Inc. +%% Copyright (c) 2007-2011 VMware, Inc. All rights reserved. +%% + +-module(rabbit_client_sup). + +-behaviour(supervisor2). + +-export([start_link/1, start_link/2]). + +-export([init/1]). + +-include("rabbit.hrl"). + +%%---------------------------------------------------------------------------- + +-ifdef(use_specs). + +-spec(start_link/1 :: (mfa()) -> + rabbit_types:ok_pid_or_error()). +-spec(start_link/2 :: ({'local', atom()}, mfa()) -> + rabbit_types:ok_pid_or_error()). + +-endif. + +%%---------------------------------------------------------------------------- + +start_link(Callback) -> + supervisor2:start_link(?MODULE, Callback). + +start_link(SupName, Callback) -> + supervisor2:start_link(SupName, ?MODULE, Callback). + +init({M,F,A}) -> + {ok, {{simple_one_for_one_terminate, 0, 1}, + [{client, {M,F,A}, temporary, infinity, supervisor, [M]}]}}. diff -Nru rabbitmq-server-1.7.2/src/rabbit_command_assembler.erl rabbitmq-server-2.3.1/src/rabbit_command_assembler.erl --- rabbitmq-server-1.7.2/src/rabbit_command_assembler.erl 1970-01-01 00:00:00.000000000 +0000 +++ rabbitmq-server-2.3.1/src/rabbit_command_assembler.erl 2011-02-03 12:47:35.000000000 +0000 @@ -0,0 +1,133 @@ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License +%% at http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and +%% limitations under the License. +%% +%% The Original Code is RabbitMQ. +%% +%% The Initial Developer of the Original Code is VMware, Inc. +%% Copyright (c) 2007-2011 VMware, Inc. All rights reserved. +%% + +-module(rabbit_command_assembler). +-include("rabbit_framing.hrl"). +-include("rabbit.hrl"). + +-export([analyze_frame/3, init/1, process/2]). + +%%---------------------------------------------------------------------------- + +-ifdef(use_specs). + +-type(frame_type() :: ?FRAME_METHOD | ?FRAME_HEADER | ?FRAME_BODY | + ?FRAME_OOB_METHOD | ?FRAME_OOB_HEADER | ?FRAME_OOB_BODY | + ?FRAME_TRACE | ?FRAME_HEARTBEAT). +-type(protocol() :: rabbit_framing:protocol()). +-type(method() :: rabbit_framing:amqp_method_record()). +-type(class_id() :: rabbit_framing:amqp_class_id()). +-type(weight() :: non_neg_integer()). +-type(body_size() :: non_neg_integer()). +-type(content() :: rabbit_types:undecoded_content()). + +-type(frame() :: + {'method', rabbit_framing:amqp_method_name(), binary()} | + {'content_header', class_id(), weight(), body_size(), binary()} | + {'content_body', binary()}). + +-type(state() :: + {'method', protocol()} | + {'content_header', method(), class_id(), protocol()} | + {'content_body', method(), body_size(), class_id(), protocol()}). + +-spec(analyze_frame/3 :: (frame_type(), binary(), protocol()) -> + frame() | 'heartbeat' | 'error'). + +-spec(init/1 :: (protocol()) -> {ok, state()}). +-spec(process/2 :: (frame(), state()) -> + {ok, state()} | + {ok, method(), state()} | + {ok, method(), content(), state()} | + {error, rabbit_types:amqp_error()}). + +-endif. + +%%-------------------------------------------------------------------- + +analyze_frame(?FRAME_METHOD, + <>, + Protocol) -> + MethodName = Protocol:lookup_method_name({ClassId, MethodId}), + {method, MethodName, MethodFields}; +analyze_frame(?FRAME_HEADER, + <>, + _Protocol) -> + {content_header, ClassId, Weight, BodySize, Properties}; +analyze_frame(?FRAME_BODY, Body, _Protocol) -> + {content_body, Body}; +analyze_frame(?FRAME_HEARTBEAT, <<>>, _Protocol) -> + heartbeat; +analyze_frame(_Type, _Body, _Protocol) -> + error. + +init(Protocol) -> {ok, {method, Protocol}}. + +process({method, MethodName, FieldsBin}, {method, Protocol}) -> + try + Method = Protocol:decode_method_fields(MethodName, FieldsBin), + case Protocol:method_has_content(MethodName) of + true -> {ClassId, _MethodId} = Protocol:method_id(MethodName), + {ok, {content_header, Method, ClassId, Protocol}}; + false -> {ok, Method, {method, Protocol}} + end + catch exit:#amqp_error{} = Reason -> {error, Reason} + end; +process(_Frame, {method, _Protocol}) -> + unexpected_frame("expected method frame, " + "got non method frame instead", [], none); +process({content_header, ClassId, 0, 0, PropertiesBin}, + {content_header, Method, ClassId, Protocol}) -> + Content = empty_content(ClassId, PropertiesBin, Protocol), + {ok, Method, Content, {method, Protocol}}; +process({content_header, ClassId, 0, BodySize, PropertiesBin}, + {content_header, Method, ClassId, Protocol}) -> + Content = empty_content(ClassId, PropertiesBin, Protocol), + {ok, {content_body, Method, BodySize, Content, Protocol}}; +process({content_header, HeaderClassId, 0, _BodySize, _PropertiesBin}, + {content_header, Method, ClassId, _Protocol}) -> + unexpected_frame("expected content header for class ~w, " + "got one for class ~w instead", + [ClassId, HeaderClassId], Method); +process(_Frame, {content_header, Method, ClassId, _Protocol}) -> + unexpected_frame("expected content header for class ~w, " + "got non content header frame instead", [ClassId], Method); +process({content_body, FragmentBin}, + {content_body, Method, RemainingSize, + Content = #content{payload_fragments_rev = Fragments}, Protocol}) -> + NewContent = Content#content{ + payload_fragments_rev = [FragmentBin | Fragments]}, + case RemainingSize - size(FragmentBin) of + 0 -> {ok, Method, NewContent, {method, Protocol}}; + Sz -> {ok, {content_body, Method, Sz, NewContent, Protocol}} + end; +process(_Frame, {content_body, Method, _RemainingSize, _Content, _Protocol}) -> + unexpected_frame("expected content body, " + "got non content body frame instead", [], Method). + +%%-------------------------------------------------------------------- + +empty_content(ClassId, PropertiesBin, Protocol) -> + #content{class_id = ClassId, + properties = none, + properties_bin = PropertiesBin, + protocol = Protocol, + payload_fragments_rev = []}. + +unexpected_frame(Format, Params, Method) when is_atom(Method) -> + {error, rabbit_misc:amqp_error(unexpected_frame, Format, Params, Method)}; +unexpected_frame(Format, Params, Method) -> + unexpected_frame(Format, Params, rabbit_misc:method_record_type(Method)). diff -Nru rabbitmq-server-1.7.2/src/rabbit_connection_sup.erl rabbitmq-server-2.3.1/src/rabbit_connection_sup.erl --- rabbitmq-server-1.7.2/src/rabbit_connection_sup.erl 1970-01-01 00:00:00.000000000 +0000 +++ rabbitmq-server-2.3.1/src/rabbit_connection_sup.erl 2011-02-03 12:47:35.000000000 +0000 @@ -0,0 +1,65 @@ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License +%% at http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and +%% limitations under the License. +%% +%% The Original Code is RabbitMQ. +%% +%% The Initial Developer of the Original Code is VMware, Inc. +%% Copyright (c) 2007-2011 VMware, Inc. All rights reserved. +%% + +-module(rabbit_connection_sup). + +-behaviour(supervisor2). + +-export([start_link/0, reader/1]). + +-export([init/1]). + +-include("rabbit.hrl"). + +%%---------------------------------------------------------------------------- + +-ifdef(use_specs). + +-spec(start_link/0 :: () -> {'ok', pid(), pid()}). +-spec(reader/1 :: (pid()) -> pid()). + +-endif. + +%%-------------------------------------------------------------------------- + +start_link() -> + {ok, SupPid} = supervisor2:start_link(?MODULE, []), + {ok, Collector} = + supervisor2:start_child( + SupPid, + {collector, {rabbit_queue_collector, start_link, []}, + intrinsic, ?MAX_WAIT, worker, [rabbit_queue_collector]}), + {ok, ChannelSupSupPid} = + supervisor2:start_child( + SupPid, + {channel_sup_sup, {rabbit_channel_sup_sup, start_link, []}, + intrinsic, infinity, supervisor, [rabbit_channel_sup_sup]}), + {ok, ReaderPid} = + supervisor2:start_child( + SupPid, + {reader, {rabbit_reader, start_link, + [ChannelSupSupPid, Collector, + rabbit_heartbeat:start_heartbeat_fun(SupPid)]}, + intrinsic, ?MAX_WAIT, worker, [rabbit_reader]}), + {ok, SupPid, ReaderPid}. + +reader(Pid) -> + hd(supervisor2:find_child(Pid, reader)). + +%%-------------------------------------------------------------------------- + +init([]) -> + {ok, {{one_for_all, 0, 1}, []}}. diff -Nru rabbitmq-server-1.7.2/src/rabbit_control.erl rabbitmq-server-2.3.1/src/rabbit_control.erl --- rabbitmq-server-1.7.2/src/rabbit_control.erl 2010-02-15 16:01:03.000000000 +0000 +++ rabbitmq-server-2.3.1/src/rabbit_control.erl 2011-02-03 12:47:35.000000000 +0000 @@ -1,51 +1,42 @@ -%% The contents of this file are subject to the Mozilla Public License -%% Version 1.1 (the "License"); you may not use this file except in -%% compliance with the License. You may obtain a copy of the License at -%% http://www.mozilla.org/MPL/ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License +%% at http://www.mozilla.org/MPL/ %% -%% Software distributed under the License is distributed on an "AS IS" -%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the -%% License for the specific language governing rights and limitations -%% under the License. +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and +%% limitations under the License. %% -%% The Original Code is RabbitMQ. +%% The Original Code is RabbitMQ. %% -%% The Initial Developers of the Original Code are LShift Ltd, -%% Cohesive Financial Technologies LLC, and Rabbit Technologies Ltd. -%% -%% Portions created before 22-Nov-2008 00:00:00 GMT by LShift Ltd, -%% Cohesive Financial Technologies LLC, or Rabbit Technologies Ltd -%% are Copyright (C) 2007-2008 LShift Ltd, Cohesive Financial -%% Technologies LLC, and Rabbit Technologies Ltd. -%% -%% Portions created by LShift Ltd are Copyright (C) 2007-2010 LShift -%% Ltd. Portions created by Cohesive Financial Technologies LLC are -%% Copyright (C) 2007-2010 Cohesive Financial Technologies -%% LLC. Portions created by Rabbit Technologies Ltd are Copyright -%% (C) 2007-2010 Rabbit Technologies Ltd. -%% -%% All Rights Reserved. -%% -%% Contributor(s): ______________________________________. +%% The Initial Developer of the Original Code is VMware, Inc. +%% Copyright (c) 2007-2011 VMware, Inc. All rights reserved. %% -module(rabbit_control). -include("rabbit.hrl"). --export([start/0, stop/0, action/4]). - --record(params, {quiet, node, command, args}). +-export([start/0, stop/0, action/5, diagnostics/1]). -define(RPC_TIMEOUT, infinity). +-define(QUIET_OPT, "-q"). +-define(NODE_OPT, "-n"). +-define(VHOST_OPT, "-p"). + %%---------------------------------------------------------------------------- -ifdef(use_specs). -spec(start/0 :: () -> no_return()). -spec(stop/0 :: () -> 'ok'). --spec(action/4 :: (atom(), erlang_node(), [string()], - fun ((string(), [any()]) -> 'ok')) -> 'ok'). +-spec(action/5 :: + (atom(), node(), [string()], [{string(), any()}], + fun ((string(), [any()]) -> 'ok')) + -> 'ok'). +-spec(diagnostics/1 :: (node()) -> [{string(), [any()]}]). +-spec(usage/0 :: () -> no_return()). -endif. @@ -54,191 +45,126 @@ start() -> {ok, [[NodeStr|_]|_]} = init:get_argument(nodename), FullCommand = init:get_plain_arguments(), - #params{quiet = Quiet, node = Node, command = Command, args = Args} = - parse_args(FullCommand, #params{quiet = false, - node = rabbit_misc:makenode(NodeStr)}), + case FullCommand of + [] -> usage(); + _ -> ok + end, + {[Command0 | Args], Opts} = + rabbit_misc:get_options( + [{flag, ?QUIET_OPT}, {option, ?NODE_OPT, NodeStr}, + {option, ?VHOST_OPT, "/"}], + FullCommand), + Opts1 = lists:map(fun({K, V}) -> + case K of + ?NODE_OPT -> {?NODE_OPT, rabbit_misc:makenode(V)}; + _ -> {K, V} + end + end, Opts), + Command = list_to_atom(Command0), + Quiet = proplists:get_bool(?QUIET_OPT, Opts1), + Node = proplists:get_value(?NODE_OPT, Opts1), Inform = case Quiet of - true -> fun(_Format, _Args1) -> ok end; - false -> fun(Format, Args1) -> + true -> fun (_Format, _Args1) -> ok end; + false -> fun (Format, Args1) -> io:format(Format ++ " ...~n", Args1) - end + end end, %% The reason we don't use a try/catch here is that rpc:call turns %% thrown errors into normal return values - case catch action(Command, Node, Args, Inform) of + case catch action(Command, Node, Args, Opts, Inform) of ok -> case Quiet of true -> ok; false -> io:format("...done.~n") end, - halt(); + quit(0); {'EXIT', {function_clause, [{?MODULE, action, _} | _]}} -> - error("invalid command '~s'", - [lists:flatten( - rabbit_misc:intersperse( - " ", [atom_to_list(Command) | Args]))]), + print_error("invalid command '~s'", + [string:join([atom_to_list(Command) | Args], " ")]), usage(); {error, Reason} -> - error("~p", [Reason]), - halt(2); + print_error("~p", [Reason]), + quit(2); {badrpc, {'EXIT', Reason}} -> - error("~p", [Reason]), - halt(2); + print_error("~p", [Reason]), + quit(2); {badrpc, Reason} -> - error("unable to connect to node ~w: ~w", [Node, Reason]), + print_error("unable to connect to node ~w: ~w", [Node, Reason]), print_badrpc_diagnostics(Node), - halt(2); + quit(2); Other -> - error("~p", [Other]), - halt(2) + print_error("~p", [Other]), + quit(2) end. fmt_stderr(Format, Args) -> rabbit_misc:format_stderr(Format ++ "~n", Args). -error(Format, Args) -> fmt_stderr("Error: " ++ Format, Args). +print_error(Format, Args) -> fmt_stderr("Error: " ++ Format, Args). print_badrpc_diagnostics(Node) -> - fmt_stderr("diagnostics:", []), - {_NodeName, NodeHost} = rabbit_misc:nodeparts(Node), - case net_adm:names(NodeHost) of - {error, EpmdReason} -> - fmt_stderr("- unable to connect to epmd on ~s: ~w", - [NodeHost, EpmdReason]); - {ok, NamePorts} -> - fmt_stderr("- nodes and their ports on ~s: ~p", - [NodeHost, [{list_to_atom(Name), Port} || - {Name, Port} <- NamePorts]]) - end, - fmt_stderr("- current node: ~w", [node()]), - case init:get_argument(home) of - {ok, [[Home]]} -> fmt_stderr("- current node home dir: ~s", [Home]); - Other -> fmt_stderr("- no current node home dir: ~p", [Other]) - end, - fmt_stderr("- current node cookie hash: ~s", [rabbit_misc:cookie_hash()]), - ok. + [fmt_stderr(Fmt, Args) || {Fmt, Args} <- diagnostics(Node)]. -parse_args(["-n", NodeS | Args], Params) -> - parse_args(Args, Params#params{node = rabbit_misc:makenode(NodeS)}); -parse_args(["-q" | Args], Params) -> - parse_args(Args, Params#params{quiet = true}); -parse_args([Command | Args], Params) -> - Params#params{command = list_to_atom(Command), args = Args}; -parse_args([], _) -> - usage(). +diagnostics(Node) -> + {_NodeName, NodeHost} = rabbit_misc:nodeparts(Node), + [ + {"diagnostics:", []}, + case net_adm:names(NodeHost) of + {error, EpmdReason} -> + {"- unable to connect to epmd on ~s: ~w", + [NodeHost, EpmdReason]}; + {ok, NamePorts} -> + {"- nodes and their ports on ~s: ~p", + [NodeHost, [{list_to_atom(Name), Port} || + {Name, Port} <- NamePorts]]} + end, + {"- current node: ~w", [node()]}, + case init:get_argument(home) of + {ok, [[Home]]} -> {"- current node home dir: ~s", [Home]}; + Other -> {"- no current node home dir: ~p", [Other]} + end, + {"- current node cookie hash: ~s", [rabbit_misc:cookie_hash()]} + ]. stop() -> ok. usage() -> - io:format("Usage: rabbitmqctl [-q] [-n ] [ ...] + io:format("~s", [rabbit_ctl_usage:usage()]), + quit(1). -Available commands: - - stop - stops the RabbitMQ application and halts the node - stop_app - stops the RabbitMQ application, leaving the node running - start_app - starts the RabbitMQ application on an already-running node - reset - resets node to default configuration, deleting all data - force_reset - cluster ... - status - rotate_logs [Suffix] - close_connection - - add_user - delete_user - change_password - list_users - - add_vhost - delete_vhost - list_vhosts - - set_permissions [-p ] - clear_permissions [-p ] - list_permissions [-p ] - list_user_permissions - - list_queues [-p ] [ ...] - list_exchanges [-p ] [ ...] - list_bindings [-p ] - list_connections [ ...] - list_channels [ ...] - list_consumers [-p ] - -Quiet output mode is selected with the \"-q\" flag. Informational -messages are suppressed when quiet mode is in effect. - - should be the name of the master node of the RabbitMQ -cluster. It defaults to the node named \"rabbit\" on the local -host. On a host named \"server.example.com\", the master node will -usually be rabbit@server (unless RABBITMQ_NODENAME has been set to -some non-default value at broker startup time). The output of hostname --s is usually the correct suffix to use after the \"@\" sign. - -The list_queues, list_exchanges and list_bindings commands accept an -optional virtual host parameter for which to display results. The -default value is \"/\". - - must be a member of the list [name, durable, -auto_delete, arguments, pid, owner_pid, exclusive_consumer_pid, -exclusive_consumer_tag, messages_ready, messages_unacknowledged, -messages_uncommitted, messages, acks_uncommitted, consumers, -transactions, memory]. The default is to display name and (number of) -messages. - - must be a member of the list [name, type, durable, -auto_delete, arguments]. The default is to display name and type. - -The output format for \"list_bindings\" is a list of rows containing -exchange name, queue name, routing key and arguments, in that order. - - must be a member of the list [pid, address, port, -peer_address, peer_port, state, channels, user, vhost, timeout, -frame_max, client_properties, recv_oct, recv_cnt, send_oct, send_cnt, -send_pend]. The default is to display user, peer_address, peer_port -and state. - - must be a member of the list [pid, connection, -number, user, vhost, transactional, consumer_count, -messages_unacknowledged, acks_uncommitted, prefetch_count]. The -default is to display pid, user, transactional, consumer_count, -messages_unacknowledged. - -The output format for \"list_consumers\" is a list of rows containing, -in order, the queue name, channel process id, consumer tag, and a -boolean indicating whether acknowledgements are expected from the -consumer. - -"), - halt(1). - -action(stop, Node, [], Inform) -> +action(stop, Node, [], _Opts, Inform) -> Inform("Stopping and halting node ~p", [Node]), call(Node, {rabbit, stop_and_halt, []}); -action(stop_app, Node, [], Inform) -> +action(stop_app, Node, [], _Opts, Inform) -> Inform("Stopping node ~p", [Node]), call(Node, {rabbit, stop, []}); -action(start_app, Node, [], Inform) -> +action(start_app, Node, [], _Opts, Inform) -> Inform("Starting node ~p", [Node]), call(Node, {rabbit, start, []}); -action(reset, Node, [], Inform) -> +action(reset, Node, [], _Opts, Inform) -> Inform("Resetting node ~p", [Node]), call(Node, {rabbit_mnesia, reset, []}); -action(force_reset, Node, [], Inform) -> +action(force_reset, Node, [], _Opts, Inform) -> Inform("Forcefully resetting node ~p", [Node]), call(Node, {rabbit_mnesia, force_reset, []}); -action(cluster, Node, ClusterNodeSs, Inform) -> +action(cluster, Node, ClusterNodeSs, _Opts, Inform) -> ClusterNodes = lists:map(fun list_to_atom/1, ClusterNodeSs), Inform("Clustering node ~p with ~p", [Node, ClusterNodes]), rpc_call(Node, rabbit_mnesia, cluster, [ClusterNodes]); -action(status, Node, [], Inform) -> +action(force_cluster, Node, ClusterNodeSs, _Opts, Inform) -> + ClusterNodes = lists:map(fun list_to_atom/1, ClusterNodeSs), + Inform("Forcefully clustering node ~p with ~p (ignoring offline nodes)", + [Node, ClusterNodes]), + rpc_call(Node, rabbit_mnesia, force_cluster, [ClusterNodes]); + +action(status, Node, [], _Opts, Inform) -> Inform("Status of node ~p", [Node]), case call(Node, {rabbit, status, []}) of {badrpc, _} = Res -> Res; @@ -246,128 +172,132 @@ ok end; -action(rotate_logs, Node, [], Inform) -> +action(rotate_logs, Node, [], _Opts, Inform) -> Inform("Reopening logs for node ~p", [Node]), call(Node, {rabbit, rotate_logs, [""]}); -action(rotate_logs, Node, Args = [Suffix], Inform) -> +action(rotate_logs, Node, Args = [Suffix], _Opts, Inform) -> Inform("Rotating logs to files with suffix ~p", [Suffix]), call(Node, {rabbit, rotate_logs, Args}); -action(close_connection, Node, [PidStr, Explanation], Inform) -> +action(close_connection, Node, [PidStr, Explanation], _Opts, Inform) -> Inform("Closing connection ~s", [PidStr]), rpc_call(Node, rabbit_networking, close_connection, [rabbit_misc:string_to_pid(PidStr), Explanation]); -action(add_user, Node, Args = [Username, _Password], Inform) -> +action(add_user, Node, Args = [Username, _Password], _Opts, Inform) -> Inform("Creating user ~p", [Username]), - call(Node, {rabbit_access_control, add_user, Args}); + call(Node, {rabbit_auth_backend_internal, add_user, Args}); -action(delete_user, Node, Args = [_Username], Inform) -> +action(delete_user, Node, Args = [_Username], _Opts, Inform) -> Inform("Deleting user ~p", Args), - call(Node, {rabbit_access_control, delete_user, Args}); + call(Node, {rabbit_auth_backend_internal, delete_user, Args}); -action(change_password, Node, Args = [Username, _Newpassword], Inform) -> +action(change_password, Node, Args = [Username, _Newpassword], _Opts, Inform) -> Inform("Changing password for user ~p", [Username]), - call(Node, {rabbit_access_control, change_password, Args}); + call(Node, {rabbit_auth_backend_internal, change_password, Args}); + +action(clear_password, Node, Args = [Username], _Opts, Inform) -> + Inform("Clearing password for user ~p", [Username]), + call(Node, {rabbit_auth_backend_internal, clear_password, Args}); + +action(set_admin, Node, [Username], _Opts, Inform) -> + Inform("Setting administrative status for user ~p", [Username]), + call(Node, {rabbit_auth_backend_internal, set_admin, [Username]}); -action(list_users, Node, [], Inform) -> +action(clear_admin, Node, [Username], _Opts, Inform) -> + Inform("Clearing administrative status for user ~p", [Username]), + call(Node, {rabbit_auth_backend_internal, clear_admin, [Username]}); + +action(list_users, Node, [], _Opts, Inform) -> Inform("Listing users", []), - display_list(call(Node, {rabbit_access_control, list_users, []})); + display_list(call(Node, {rabbit_auth_backend_internal, list_users, []})); -action(add_vhost, Node, Args = [_VHostPath], Inform) -> +action(add_vhost, Node, Args = [_VHostPath], _Opts, Inform) -> Inform("Creating vhost ~p", Args), - call(Node, {rabbit_access_control, add_vhost, Args}); + call(Node, {rabbit_vhost, add, Args}); -action(delete_vhost, Node, Args = [_VHostPath], Inform) -> +action(delete_vhost, Node, Args = [_VHostPath], _Opts, Inform) -> Inform("Deleting vhost ~p", Args), - call(Node, {rabbit_access_control, delete_vhost, Args}); + call(Node, {rabbit_vhost, delete, Args}); -action(list_vhosts, Node, [], Inform) -> +action(list_vhosts, Node, [], _Opts, Inform) -> Inform("Listing vhosts", []), - display_list(call(Node, {rabbit_access_control, list_vhosts, []})); + display_list(call(Node, {rabbit_vhost, list, []})); -action(list_user_permissions, Node, Args = [_Username], Inform) -> +action(list_user_permissions, Node, Args = [_Username], _Opts, Inform) -> Inform("Listing permissions for user ~p", Args), - display_list(call(Node, {rabbit_access_control, list_user_permissions, - Args})); + display_list(call(Node, {rabbit_auth_backend_internal, + list_user_permissions, Args})); -action(list_queues, Node, Args, Inform) -> +action(list_queues, Node, Args, Opts, Inform) -> Inform("Listing queues", []), - {VHostArg, RemainingArgs} = parse_vhost_flag_bin(Args), - ArgAtoms = default_if_empty(RemainingArgs, [name, messages]), + VHostArg = list_to_binary(proplists:get_value(?VHOST_OPT, Opts)), + ArgAtoms = default_if_empty(Args, [name, messages]), display_info_list(rpc_call(Node, rabbit_amqqueue, info_all, [VHostArg, ArgAtoms]), ArgAtoms); -action(list_exchanges, Node, Args, Inform) -> +action(list_exchanges, Node, Args, Opts, Inform) -> Inform("Listing exchanges", []), - {VHostArg, RemainingArgs} = parse_vhost_flag_bin(Args), - ArgAtoms = default_if_empty(RemainingArgs, [name, type]), + VHostArg = list_to_binary(proplists:get_value(?VHOST_OPT, Opts)), + ArgAtoms = default_if_empty(Args, [name, type]), display_info_list(rpc_call(Node, rabbit_exchange, info_all, [VHostArg, ArgAtoms]), ArgAtoms); -action(list_bindings, Node, Args, Inform) -> +action(list_bindings, Node, Args, Opts, Inform) -> Inform("Listing bindings", []), - {VHostArg, _} = parse_vhost_flag_bin(Args), - InfoKeys = [exchange_name, queue_name, routing_key, args], - display_info_list( - [lists:zip(InfoKeys, tuple_to_list(X)) || - X <- rpc_call(Node, rabbit_exchange, list_bindings, [VHostArg])], - InfoKeys); + VHostArg = list_to_binary(proplists:get_value(?VHOST_OPT, Opts)), + ArgAtoms = default_if_empty(Args, [source_name, source_kind, + destination_name, destination_kind, + routing_key, arguments]), + display_info_list(rpc_call(Node, rabbit_binding, info_all, + [VHostArg, ArgAtoms]), + ArgAtoms); -action(list_connections, Node, Args, Inform) -> +action(list_connections, Node, Args, _Opts, Inform) -> Inform("Listing connections", []), ArgAtoms = default_if_empty(Args, [user, peer_address, peer_port, state]), display_info_list(rpc_call(Node, rabbit_networking, connection_info_all, [ArgAtoms]), ArgAtoms); -action(list_channels, Node, Args, Inform) -> +action(list_channels, Node, Args, _Opts, Inform) -> Inform("Listing channels", []), ArgAtoms = default_if_empty(Args, [pid, user, transactional, consumer_count, messages_unacknowledged]), display_info_list(rpc_call(Node, rabbit_channel, info_all, [ArgAtoms]), ArgAtoms); -action(list_consumers, Node, Args, Inform) -> +action(list_consumers, Node, _Args, Opts, Inform) -> Inform("Listing consumers", []), - {VHostArg, _} = parse_vhost_flag_bin(Args), + VHostArg = list_to_binary(proplists:get_value(?VHOST_OPT, Opts)), InfoKeys = [queue_name, channel_pid, consumer_tag, ack_required], - display_info_list( - [lists:zip(InfoKeys, tuple_to_list(X)) || - X <- rpc_call(Node, rabbit_amqqueue, consumers_all, [VHostArg])], - InfoKeys); - -action(Command, Node, Args, Inform) -> - {VHost, RemainingArgs} = parse_vhost_flag(Args), - action(Command, Node, VHost, RemainingArgs, Inform). + case rpc_call(Node, rabbit_amqqueue, consumers_all, [VHostArg]) of + L when is_list(L) -> display_info_list( + [lists:zip(InfoKeys, tuple_to_list(X)) || + X <- L], + InfoKeys); + Other -> Other + end; -action(set_permissions, Node, VHost, [Username, CPerm, WPerm, RPerm], Inform) -> +action(set_permissions, Node, [Username, CPerm, WPerm, RPerm], Opts, Inform) -> + VHost = proplists:get_value(?VHOST_OPT, Opts), Inform("Setting permissions for user ~p in vhost ~p", [Username, VHost]), - call(Node, {rabbit_access_control, set_permissions, + call(Node, {rabbit_auth_backend_internal, set_permissions, [Username, VHost, CPerm, WPerm, RPerm]}); -action(clear_permissions, Node, VHost, [Username], Inform) -> +action(clear_permissions, Node, [Username], Opts, Inform) -> + VHost = proplists:get_value(?VHOST_OPT, Opts), Inform("Clearing permissions for user ~p in vhost ~p", [Username, VHost]), - call(Node, {rabbit_access_control, clear_permissions, [Username, VHost]}); + call(Node, {rabbit_auth_backend_internal, clear_permissions, + [Username, VHost]}); -action(list_permissions, Node, VHost, [], Inform) -> +action(list_permissions, Node, [], Opts, Inform) -> + VHost = proplists:get_value(?VHOST_OPT, Opts), Inform("Listing permissions in vhost ~p", [VHost]), - display_list(call(Node, {rabbit_access_control, list_vhost_permissions, - [VHost]})). - -parse_vhost_flag(Args) when is_list(Args) -> - case Args of - ["-p", VHost | RemainingArgs] -> - {VHost, RemainingArgs}; - RemainingArgs -> - {"/", RemainingArgs} - end. - -parse_vhost_flag_bin(Args) -> - {VHost, RemainingArgs} = parse_vhost_flag(Args), - {list_to_binary(VHost), RemainingArgs}. + display_list(call(Node, {rabbit_auth_backend_internal, + list_vhost_permissions, [VHost]})). default_if_empty(List, Default) when is_list(List) -> if List == [] -> @@ -377,36 +307,43 @@ end. display_info_list(Results, InfoItemKeys) when is_list(Results) -> - lists:foreach(fun (Result) -> display_row([format_info_item(X, Result) || - X <- InfoItemKeys]) - end, Results), + lists:foreach( + fun (Result) -> display_row( + [format_info_item(proplists:get_value(X, Result)) || + X <- InfoItemKeys]) + end, Results), ok; display_info_list(Other, _) -> Other. display_row(Row) -> - io:fwrite(lists:flatten(rabbit_misc:intersperse("\t", Row))), + io:fwrite(string:join(Row, "\t")), io:nl(). -format_info_item(Key, Items) -> - case proplists:get_value(Key, Items) of - #resource{name = Name} -> - escape(Name); - Value when Key =:= address; Key =:= peer_address andalso - is_tuple(Value) -> - inet_parse:ntoa(Value); - Value when is_pid(Value) -> - rabbit_misc:pid_to_string(Value); - Value when is_binary(Value) -> - escape(Value); - Value when is_atom(Value) -> - escape(atom_to_list(Value)); - Value = [{TableEntryKey, TableEntryType, _TableEntryValue} | _] - when is_binary(TableEntryKey) andalso is_atom(TableEntryType) -> - io_lib:format("~1000000000000p", [prettify_amqp_table(Value)]); - Value -> - io_lib:format("~w", [Value]) - end. +-define(IS_U8(X), (X >= 0 andalso X =< 255)). +-define(IS_U16(X), (X >= 0 andalso X =< 65535)). + +format_info_item(#resource{name = Name}) -> + escape(Name); +format_info_item({N1, N2, N3, N4} = Value) when + ?IS_U8(N1), ?IS_U8(N2), ?IS_U8(N3), ?IS_U8(N4) -> + rabbit_misc:ntoa(Value); +format_info_item({K1, K2, K3, K4, K5, K6, K7, K8} = Value) when + ?IS_U16(K1), ?IS_U16(K2), ?IS_U16(K3), ?IS_U16(K4), + ?IS_U16(K5), ?IS_U16(K6), ?IS_U16(K7), ?IS_U16(K8) -> + rabbit_misc:ntoa(Value); +format_info_item(Value) when is_pid(Value) -> + rabbit_misc:pid_to_string(Value); +format_info_item(Value) when is_binary(Value) -> + escape(Value); +format_info_item(Value) when is_atom(Value) -> + escape(atom_to_list(Value)); +format_info_item([{TableEntryKey, TableEntryType, _TableEntryValue} | _] = + Value) when is_binary(TableEntryKey) andalso + is_atom(TableEntryType) -> + io_lib:format("~1000000000000p", [prettify_amqp_table(Value)]); +format_info_item(Value) -> + io_lib:format("~w", [Value]). display_list(L) when is_list(L) -> lists:foreach(fun (I) when is_binary(I) -> @@ -429,6 +366,8 @@ %% characters. We don't escape characters above 127, since they may %% form part of UTF-8 strings. +escape(Atom) when is_atom(Atom) -> + escape(atom_to_list(Atom)); escape(Bin) when is_binary(Bin) -> escape(binary_to_list(Bin)); escape(L) when is_list(L) -> @@ -454,3 +393,12 @@ array -> [prettify_typed_amqp_value(T, V) || {T, V} <- Value]; _ -> Value end. + +% the slower shutdown on windows required to flush stdout +quit(Status) -> + case os:type() of + {unix, _} -> + halt(Status); + {win32, _} -> + init:stop(Status) + end. diff -Nru rabbitmq-server-1.7.2/src/rabbit_dialyzer.erl rabbitmq-server-2.3.1/src/rabbit_dialyzer.erl --- rabbitmq-server-1.7.2/src/rabbit_dialyzer.erl 2010-02-15 16:01:03.000000000 +0000 +++ rabbitmq-server-2.3.1/src/rabbit_dialyzer.erl 1970-01-01 00:00:00.000000000 +0000 @@ -1,91 +0,0 @@ -%% The contents of this file are subject to the Mozilla Public License -%% Version 1.1 (the "License"); you may not use this file except in -%% compliance with the License. You may obtain a copy of the License at -%% http://www.mozilla.org/MPL/ -%% -%% Software distributed under the License is distributed on an "AS IS" -%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the -%% License for the specific language governing rights and limitations -%% under the License. -%% -%% The Original Code is RabbitMQ. -%% -%% The Initial Developers of the Original Code are LShift Ltd, -%% Cohesive Financial Technologies LLC, and Rabbit Technologies Ltd. -%% -%% Portions created before 22-Nov-2008 00:00:00 GMT by LShift Ltd, -%% Cohesive Financial Technologies LLC, or Rabbit Technologies Ltd -%% are Copyright (C) 2007-2008 LShift Ltd, Cohesive Financial -%% Technologies LLC, and Rabbit Technologies Ltd. -%% -%% Portions created by LShift Ltd are Copyright (C) 2007-2010 LShift -%% Ltd. Portions created by Cohesive Financial Technologies LLC are -%% Copyright (C) 2007-2010 Cohesive Financial Technologies -%% LLC. Portions created by Rabbit Technologies Ltd are Copyright -%% (C) 2007-2010 Rabbit Technologies Ltd. -%% -%% All Rights Reserved. -%% -%% Contributor(s): ______________________________________. -%% - --module(rabbit_dialyzer). --include("rabbit.hrl"). - --export([create_basic_plt/1, add_to_plt/2, dialyze_files/2, halt_with_code/1]). - -%%---------------------------------------------------------------------------- - --ifdef(use_specs). - --spec(create_basic_plt/1 :: (string()) -> 'ok'). --spec(add_to_plt/2 :: (string(), string()) -> 'ok'). --spec(dialyze_files/2 :: (string(), string()) -> 'ok'). --spec(halt_with_code/1 :: (atom()) -> no_return()). - --endif. - -%%---------------------------------------------------------------------------- - -create_basic_plt(BasicPltPath) -> - OptsRecord = dialyzer_options:build( - [{analysis_type, plt_build}, - {output_plt, BasicPltPath}, - {files_rec, otp_apps_dependencies_paths()}]), - dialyzer_cl:start(OptsRecord), - ok. - -add_to_plt(PltPath, FilesString) -> - {ok, Files} = regexp:split(FilesString, " "), - DialyzerWarnings = dialyzer:run([{analysis_type, plt_add}, - {init_plt, PltPath}, - {output_plt, PltPath}, - {files, Files}]), - print_warnings(DialyzerWarnings), - ok. - -dialyze_files(PltPath, ModifiedFiles) -> - {ok, Files} = regexp:split(ModifiedFiles, " "), - DialyzerWarnings = dialyzer:run([{init_plt, PltPath}, - {files, Files}]), - case DialyzerWarnings of - [] -> io:format("~nOk~n"), - ok; - _ -> io:format("~nFAILED with the following warnings:~n"), - print_warnings(DialyzerWarnings), - fail - end. - -print_warnings(Warnings) -> - [io:format("~s", [dialyzer:format_warning(W)]) || W <- Warnings], - io:format("~n"), - ok. - -otp_apps_dependencies_paths() -> - [code:lib_dir(App, ebin) || - App <- [kernel, stdlib, sasl, mnesia, os_mon, ssl, eunit, tools]]. - -halt_with_code(ok) -> - halt(); -halt_with_code(fail) -> - halt(1). diff -Nru rabbitmq-server-1.7.2/src/rabbit_direct.erl rabbitmq-server-2.3.1/src/rabbit_direct.erl --- rabbitmq-server-1.7.2/src/rabbit_direct.erl 1970-01-01 00:00:00.000000000 +0000 +++ rabbitmq-server-2.3.1/src/rabbit_direct.erl 2011-02-03 12:47:35.000000000 +0000 @@ -0,0 +1,75 @@ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License +%% at http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and +%% limitations under the License. +%% +%% The Original Code is RabbitMQ. +%% +%% The Initial Developer of the Original Code is VMware, Inc. +%% Copyright (c) 2007-2011 VMware, Inc. All rights reserved. +%% + +-module(rabbit_direct). + +-export([boot/0, connect/3, start_channel/5]). + +-include("rabbit.hrl"). + +%%---------------------------------------------------------------------------- + +-ifdef(use_specs). + +-spec(boot/0 :: () -> 'ok'). +-spec(connect/3 :: (binary(), binary(), binary()) -> + {'ok', {rabbit_types:user(), + rabbit_framing:amqp_table()}}). +-spec(start_channel/5 :: (rabbit_channel:channel_number(), pid(), + rabbit_types:user(), rabbit_types:vhost(), pid()) -> + {'ok', pid()}). + +-endif. + +%%---------------------------------------------------------------------------- + +boot() -> + {ok, _} = + supervisor2:start_child( + rabbit_sup, + {rabbit_direct_client_sup, + {rabbit_client_sup, start_link, + [{local, rabbit_direct_client_sup}, + {rabbit_channel_sup, start_link, []}]}, + transient, infinity, supervisor, [rabbit_client_sup]}), + ok. + +%%---------------------------------------------------------------------------- + +connect(Username, Password, VHost) -> + case lists:keymember(rabbit, 1, application:which_applications()) of + true -> + try rabbit_access_control:user_pass_login(Username, Password) of + #user{} = User -> + try rabbit_access_control:check_vhost_access(User, VHost) of + ok -> {ok, {User, rabbit_reader:server_properties()}} + catch + exit:#amqp_error{name = access_refused} -> + {error, access_refused} + end + catch + exit:#amqp_error{name = access_refused} -> {error, auth_failure} + end; + false -> + {error, broker_not_found_on_node} + end. + +start_channel(Number, ClientChannelPid, User, VHost, Collector) -> + {ok, _, {ChannelPid, _}} = + supervisor2:start_child( + rabbit_direct_client_sup, + [{direct, Number, ClientChannelPid, User, VHost, Collector}]), + {ok, ChannelPid}. diff -Nru rabbitmq-server-1.7.2/src/rabbit.erl rabbitmq-server-2.3.1/src/rabbit.erl --- rabbitmq-server-1.7.2/src/rabbit.erl 2010-02-15 16:01:03.000000000 +0000 +++ rabbitmq-server-2.3.1/src/rabbit.erl 2011-02-03 12:47:35.000000000 +0000 @@ -1,39 +1,25 @@ -%% The contents of this file are subject to the Mozilla Public License -%% Version 1.1 (the "License"); you may not use this file except in -%% compliance with the License. You may obtain a copy of the License at -%% http://www.mozilla.org/MPL/ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License +%% at http://www.mozilla.org/MPL/ %% -%% Software distributed under the License is distributed on an "AS IS" -%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the -%% License for the specific language governing rights and limitations -%% under the License. +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and +%% limitations under the License. %% -%% The Original Code is RabbitMQ. +%% The Original Code is RabbitMQ. %% -%% The Initial Developers of the Original Code are LShift Ltd, -%% Cohesive Financial Technologies LLC, and Rabbit Technologies Ltd. -%% -%% Portions created before 22-Nov-2008 00:00:00 GMT by LShift Ltd, -%% Cohesive Financial Technologies LLC, or Rabbit Technologies Ltd -%% are Copyright (C) 2007-2008 LShift Ltd, Cohesive Financial -%% Technologies LLC, and Rabbit Technologies Ltd. -%% -%% Portions created by LShift Ltd are Copyright (C) 2007-2010 LShift -%% Ltd. Portions created by Cohesive Financial Technologies LLC are -%% Copyright (C) 2007-2010 Cohesive Financial Technologies -%% LLC. Portions created by Rabbit Technologies Ltd are Copyright -%% (C) 2007-2010 Rabbit Technologies Ltd. -%% -%% All Rights Reserved. -%% -%% Contributor(s): ______________________________________. +%% The Initial Developer of the Original Code is VMware, Inc. +%% Copyright (c) 2007-2011 VMware, Inc. All rights reserved. %% -module(rabbit). -behaviour(application). --export([prepare/0, start/0, stop/0, stop_and_halt/0, status/0, rotate_logs/1]). +-export([prepare/0, start/0, stop/0, stop_and_halt/0, status/0, + rotate_logs/1]). -export([start/2, stop/1]). @@ -47,24 +33,51 @@ [{description, "codec correctness check"}, {mfa, {rabbit_binary_generator, check_empty_content_body_frame_size, - []}}]}). + []}}, + {enables, external_infrastructure}]}). -rabbit_boot_step({database, [{mfa, {rabbit_mnesia, init, []}}, + {enables, external_infrastructure}]}). + +-rabbit_boot_step({file_handle_cache, + [{description, "file handle cache server"}, + {mfa, {rabbit_sup, start_restartable_child, + [file_handle_cache]}}, + {enables, worker_pool}]}). + +-rabbit_boot_step({worker_pool, + [{description, "worker pool"}, + {mfa, {rabbit_sup, start_child, [worker_pool_sup]}}, + {enables, external_infrastructure}]}). + +-rabbit_boot_step({external_infrastructure, + [{description, "external infrastructure ready"}]}). + +-rabbit_boot_step({rabbit_registry, + [{description, "plugin registry"}, + {mfa, {rabbit_sup, start_child, + [rabbit_registry]}}, + {requires, external_infrastructure}, {enables, kernel_ready}]}). -rabbit_boot_step({rabbit_log, [{description, "logging server"}, - {mfa, {rabbit_sup, start_child, [rabbit_log]}}, + {mfa, {rabbit_sup, start_restartable_child, + [rabbit_log]}}, + {requires, external_infrastructure}, {enables, kernel_ready}]}). --rabbit_boot_step({rabbit_hooks, - [{description, "internal event notification system"}, - {mfa, {rabbit_hooks, start, []}}, +-rabbit_boot_step({rabbit_event, + [{description, "statistics event manager"}, + {mfa, {rabbit_sup, start_restartable_child, + [rabbit_event]}}, + {requires, external_infrastructure}, {enables, kernel_ready}]}). -rabbit_boot_step({kernel_ready, - [{description, "kernel ready"}]}). + [{description, "kernel ready"}, + {requires, external_infrastructure}]}). -rabbit_boot_step({rabbit_alarm, [{description, "alarm handler"}, @@ -72,77 +85,76 @@ {requires, kernel_ready}, {enables, core_initialized}]}). --rabbit_boot_step({rabbit_amqqueue_sup, - [{description, "queue supervisor"}, - {mfa, {rabbit_amqqueue, start, []}}, +-rabbit_boot_step({rabbit_memory_monitor, + [{description, "memory monitor"}, + {mfa, {rabbit_sup, start_restartable_child, + [rabbit_memory_monitor]}}, + {requires, rabbit_alarm}, + {enables, core_initialized}]}). + +-rabbit_boot_step({guid_generator, + [{description, "guid generator"}, + {mfa, {rabbit_sup, start_restartable_child, + [rabbit_guid]}}, {requires, kernel_ready}, {enables, core_initialized}]}). --rabbit_boot_step({rabbit_router, - [{description, "cluster router"}, - {mfa, {rabbit_sup, start_child, [rabbit_router]}}, +-rabbit_boot_step({delegate_sup, + [{description, "cluster delegate"}, + {mfa, {rabbit_sup, start_child, + [delegate_sup]}}, {requires, kernel_ready}, {enables, core_initialized}]}). -rabbit_boot_step({rabbit_node_monitor, [{description, "node monitor"}, - {mfa, {rabbit_sup, start_child, [rabbit_node_monitor]}}, + {mfa, {rabbit_sup, start_restartable_child, + [rabbit_node_monitor]}}, {requires, kernel_ready}, - {requires, rabbit_amqqueue_sup}, {enables, core_initialized}]}). -rabbit_boot_step({core_initialized, - [{description, "core initialized"}]}). + [{description, "core initialized"}, + {requires, kernel_ready}]}). -rabbit_boot_step({empty_db_check, [{description, "empty DB check"}, {mfa, {?MODULE, maybe_insert_default_data, []}}, - {requires, core_initialized}]}). + {requires, core_initialized}, + {enables, routing_ready}]}). -rabbit_boot_step({exchange_recovery, [{description, "exchange recovery"}, {mfa, {rabbit_exchange, recover, []}}, - {requires, empty_db_check}]}). - --rabbit_boot_step({queue_recovery, - [{description, "queue recovery"}, - {mfa, {rabbit_amqqueue, recover, []}}, - {requires, exchange_recovery}]}). - --rabbit_boot_step({persister, - [{mfa, {rabbit_sup, start_child, [rabbit_persister]}}, - {requires, queue_recovery}]}). + {requires, empty_db_check}, + {enables, routing_ready}]}). --rabbit_boot_step({guid_generator, - [{description, "guid generator"}, - {mfa, {rabbit_sup, start_child, [rabbit_guid]}}, - {requires, persister}, +-rabbit_boot_step({queue_sup_queue_recovery, + [{description, "queue supervisor and queue recovery"}, + {mfa, {rabbit_amqqueue, start, []}}, + {requires, empty_db_check}, {enables, routing_ready}]}). -rabbit_boot_step({routing_ready, - [{description, "message delivery logic ready"}]}). + [{description, "message delivery logic ready"}, + {requires, core_initialized}]}). -rabbit_boot_step({log_relay, [{description, "error log relay"}, {mfa, {rabbit_error_logger, boot, []}}, - {requires, routing_ready}]}). + {requires, routing_ready}, + {enables, networking}]}). + +-rabbit_boot_step({direct_client, + [{mfa, {rabbit_direct, boot, []}}, + {requires, log_relay}]}). -rabbit_boot_step({networking, [{mfa, {rabbit_networking, boot, []}}, - {requires, log_relay}, - {enables, networking_listening}]}). - --rabbit_boot_step({networking_listening, - [{description, "network listeners available"}]}). + {requires, log_relay}]}). %%--------------------------------------------------------------------------- --import(application). --import(mnesia). --import(lists). --import(inet). --import(gen_tcp). - -include("rabbit_framing.hrl"). -include("rabbit.hrl"). @@ -152,18 +164,19 @@ -ifdef(use_specs). --type(log_location() :: 'tty' | 'undefined' | string()). -type(file_suffix() :: binary()). +%% this really should be an abstract type +-type(log_location() :: 'tty' | 'undefined' | file:filename()). -spec(prepare/0 :: () -> 'ok'). -spec(start/0 :: () -> 'ok'). -spec(stop/0 :: () -> 'ok'). -spec(stop_and_halt/0 :: () -> 'ok'). --spec(rotate_logs/1 :: (file_suffix()) -> 'ok' | {'error', any()}). --spec(status/0 :: () -> - [{running_applications, [{atom(), string(), string()}]} | - {nodes, [erlang_node()]} | - {running_nodes, [erlang_node()]}]). +-spec(rotate_logs/1 :: (file_suffix()) -> rabbit_types:ok_or_error(any())). +-spec(status/0 :: + () -> [{running_applications, [{atom(), string(), string()}]} | + {nodes, [{rabbit_mnesia:node_type(), [node()]}]} | + {running_nodes, [node()]}]). -spec(log_location/1 :: ('sasl' | 'kernel') -> log_location()). -endif. @@ -171,8 +184,7 @@ %%---------------------------------------------------------------------------- prepare() -> - ok = ensure_working_log_handlers(), - ok = rabbit_mnesia:ensure_mnesia_dir(). + ok = ensure_working_log_handlers(). start() -> try @@ -187,15 +199,12 @@ ok = rabbit_misc:stop_applications(?APPS). stop_and_halt() -> - spawn(fun () -> - SleepTime = 1000, - rabbit_log:info("Stop-and-halt request received; " - "halting in ~p milliseconds~n", - [SleepTime]), - timer:sleep(SleepTime), - init:stop() - end), - case catch stop() of _ -> ok end. + try + stop() + after + init:stop() + end, + ok. status() -> [{running_applications, application:which_applications()}] ++ @@ -213,14 +222,18 @@ %%-------------------------------------------------------------------- start(normal, []) -> - {ok, SupPid} = rabbit_sup:start_link(), - - print_banner(), - [ok = run_boot_step(Step) || Step <- boot_steps()], - io:format("~nbroker running~n"), - - {ok, SupPid}. - + case erts_version_check() of + ok -> + {ok, SupPid} = rabbit_sup:start_link(), + + print_banner(), + [ok = run_boot_step(Step) || Step <- boot_steps()], + io:format("~nbroker running~n"), + + {ok, SupPid}; + Error -> + Error + end. stop(_State) -> terminated_ok = error_logger:delete_report_handler(rabbit_error_logger), @@ -233,6 +246,14 @@ %%--------------------------------------------------------------------------- +erts_version_check() -> + FoundVer = erlang:system_info(version), + case rabbit_misc:version_compare(?ERTS_MINIMUM, FoundVer, lte) of + true -> ok; + false -> {error, {erlang_version_too_old, + {found, FoundVer}, {required, ?ERTS_MINIMUM}}} + end. + boot_error(Format, Args) -> io:format("BOOT ERROR: " ++ Format, Args), error_logger:error_msg(Format, Args), @@ -249,84 +270,67 @@ io:format("-- ~s~n", [Description]); MFAs -> io:format("starting ~-60s ...", [Description]), - [case catch apply(M,F,A) of - {'EXIT', Reason} -> - boot_error("FAILED~nReason: ~p~n", [Reason]); - ok -> - ok + [try + apply(M,F,A) + catch + _:Reason -> boot_error("FAILED~nReason: ~p~nStacktrace: ~p~n", + [Reason, erlang:get_stacktrace()]) end || {M,F,A} <- MFAs], io:format("done~n"), ok end. boot_steps() -> - AllApps = [App || {App, _, _} <- application:loaded_applications()], - Modules = lists:usort( - lists:append([Modules - || {ok, Modules} <- - [application:get_key(App, modules) - || App <- AllApps]])), - UnsortedSteps = - lists:flatmap(fun (Module) -> - [{StepName, Attributes} - || {rabbit_boot_step, [{StepName, Attributes}]} - <- Module:module_info(attributes)] - end, Modules), - sort_boot_steps(UnsortedSteps). + sort_boot_steps(rabbit_misc:all_module_attributes(rabbit_boot_step)). -sort_boot_steps(UnsortedSteps) -> - G = digraph:new([acyclic]), +vertices(_Module, Steps) -> + [{StepName, {StepName, Atts}} || {StepName, Atts} <- Steps]. + +edges(_Module, Steps) -> + [case Key of + requires -> {StepName, OtherStep}; + enables -> {OtherStep, StepName} + end || {StepName, Atts} <- Steps, + {Key, OtherStep} <- Atts, + Key =:= requires orelse Key =:= enables]. - %% Add vertices, with duplicate checking. - [case digraph:vertex(G, StepName) of - false -> digraph:add_vertex(G, StepName, Step); - _ -> boot_error("Duplicate boot step name: ~w~n", [StepName]) - end || Step = {StepName, _Attrs} <- UnsortedSteps], - - %% Add edges, detecting cycles and missing vertices. - lists:foreach(fun ({StepName, Attributes}) -> - [add_boot_step_dep(G, StepName, PrecedingStepName) - || {requires, PrecedingStepName} <- Attributes], - [add_boot_step_dep(G, SucceedingStepName, StepName) - || {enables, SucceedingStepName} <- Attributes] - end, UnsortedSteps), - - %% Use topological sort to find a consistent ordering (if there is - %% one, otherwise fail). - SortedStepsRev = [begin - {StepName, Step} = digraph:vertex(G, StepName), - Step - end || StepName <- digraph_utils:topsort(G)], - SortedSteps = lists:reverse(SortedStepsRev), - - digraph:delete(G), - - %% Check that all mentioned {M,F,A} triples are exported. - case [{StepName, {M,F,A}} - || {StepName, Attributes} <- SortedSteps, - {mfa, {M,F,A}} <- Attributes, - not erlang:function_exported(M, F, length(A))] of - [] -> SortedSteps; - MissingFunctions -> boot_error("Boot step functions not exported: ~p~n", - [MissingFunctions]) - end. - -add_boot_step_dep(G, RunsSecond, RunsFirst) -> - case digraph:add_edge(G, RunsSecond, RunsFirst) of - {error, Reason} -> - boot_error("Could not add boot step dependency of ~w on ~w:~n~s", - [RunsSecond, RunsFirst, +sort_boot_steps(UnsortedSteps) -> + case rabbit_misc:build_acyclic_graph(fun vertices/2, fun edges/2, + UnsortedSteps) of + {ok, G} -> + %% Use topological sort to find a consistent ordering (if + %% there is one, otherwise fail). + SortedSteps = lists:reverse( + [begin + {StepName, Step} = digraph:vertex(G, StepName), + Step + end || StepName <- digraph_utils:topsort(G)]), + digraph:delete(G), + %% Check that all mentioned {M,F,A} triples are exported. + case [{StepName, {M,F,A}} || + {StepName, Attributes} <- SortedSteps, + {mfa, {M,F,A}} <- Attributes, + not erlang:function_exported(M, F, length(A))] of + [] -> SortedSteps; + MissingFunctions -> boot_error( + "Boot step functions not exported: ~p~n", + [MissingFunctions]) + end; + {error, {vertex, duplicate, StepName}} -> + boot_error("Duplicate boot step name: ~w~n", [StepName]); + {error, {edge, Reason, From, To}} -> + boot_error( + "Could not add boot step dependency of ~w on ~w:~n~s", + [To, From, case Reason of {bad_vertex, V} -> io_lib:format("Boot step not registered: ~w~n", [V]); {bad_edge, [First | Rest]} -> [io_lib:format("Cyclic dependency: ~w", [First]), - [io_lib:format(" depends on ~w", [Next]) - || Next <- Rest], + [io_lib:format(" depends on ~w", [Next]) || + Next <- Rest], io_lib:format(" depends on ~w~n", [First])] - end]); - _ -> - ok + end]) end. %%--------------------------------------------------------------------------- @@ -354,6 +358,14 @@ Other -> Other end. +config_files() -> + case init:get_argument(config) of + {ok, Files} -> [filename:absname( + filename:rootname(File, ".config") ++ ".config") || + File <- Files]; + error -> [] + end. + %--------------------------------------------------------------------------- print_banner() -> @@ -372,20 +384,31 @@ "| ~s +---+ |~n" "| |~n" "+-------------------+~n" - "AMQP ~p-~p~n~s~n~s~n~n", + "~s~n~s~n~s~n~n", [Product, string:right([$v|Version], ProductLen), - ?PROTOCOL_VERSION_MAJOR, ?PROTOCOL_VERSION_MINOR, + ?PROTOCOL_VERSION, ?COPYRIGHT_MESSAGE, ?INFORMATION_MESSAGE]), Settings = [{"node", node()}, {"app descriptor", app_location()}, {"home dir", home_dir()}, + {"config file(s)", config_files()}, {"cookie hash", rabbit_misc:cookie_hash()}, {"log", log_location(kernel)}, {"sasl log", log_location(sasl)}, - {"database dir", rabbit_mnesia:dir()}], - DescrLen = lists:max([length(K) || {K, _V} <- Settings]), - Format = "~-" ++ integer_to_list(DescrLen) ++ "s: ~s~n", - lists:foreach(fun ({K, V}) -> io:format(Format, [K, V]) end, Settings), + {"database dir", rabbit_mnesia:dir()}, + {"erlang version", erlang:system_info(version)}], + DescrLen = 1 + lists:max([length(K) || {K, _V} <- Settings]), + Format = fun (K, V) -> + io:format("~-" ++ integer_to_list(DescrLen) ++ "s: ~s~n", + [K, V]) + end, + lists:foreach(fun ({"config file(s)" = K, []}) -> + Format(K, "(none)"); + ({"config file(s)" = K, [V0 | Vs]}) -> + Format(K, V0), [Format("", V) || V <- Vs]; + ({K, V}) -> + Format(K, V) + end, Settings), io:nl(). ensure_working_log_handlers() -> @@ -434,15 +457,20 @@ insert_default_data() -> {ok, DefaultUser} = application:get_env(default_user), {ok, DefaultPass} = application:get_env(default_pass), + {ok, DefaultAdmin} = application:get_env(default_user_is_admin), {ok, DefaultVHost} = application:get_env(default_vhost), {ok, [DefaultConfigurePerm, DefaultWritePerm, DefaultReadPerm]} = application:get_env(default_permissions), - ok = rabbit_access_control:add_vhost(DefaultVHost), - ok = rabbit_access_control:add_user(DefaultUser, DefaultPass), - ok = rabbit_access_control:set_permissions(DefaultUser, DefaultVHost, - DefaultConfigurePerm, - DefaultWritePerm, - DefaultReadPerm), + ok = rabbit_vhost:add(DefaultVHost), + ok = rabbit_auth_backend_internal:add_user(DefaultUser, DefaultPass), + case DefaultAdmin of + true -> rabbit_auth_backend_internal:set_admin(DefaultUser); + _ -> ok + end, + ok = rabbit_auth_backend_internal:set_permissions(DefaultUser, DefaultVHost, + DefaultConfigurePerm, + DefaultWritePerm, + DefaultReadPerm), ok. rotate_logs(File, Suffix, Handler) -> diff -Nru rabbitmq-server-1.7.2/src/rabbit_error_logger.erl rabbitmq-server-2.3.1/src/rabbit_error_logger.erl --- rabbitmq-server-1.7.2/src/rabbit_error_logger.erl 2010-02-15 16:01:03.000000000 +0000 +++ rabbitmq-server-2.3.1/src/rabbit_error_logger.erl 2011-02-03 12:47:35.000000000 +0000 @@ -1,32 +1,17 @@ -%% The contents of this file are subject to the Mozilla Public License -%% Version 1.1 (the "License"); you may not use this file except in -%% compliance with the License. You may obtain a copy of the License at -%% http://www.mozilla.org/MPL/ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License +%% at http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and +%% limitations under the License. %% -%% Software distributed under the License is distributed on an "AS IS" -%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the -%% License for the specific language governing rights and limitations -%% under the License. +%% The Original Code is RabbitMQ. %% -%% The Original Code is RabbitMQ. -%% -%% The Initial Developers of the Original Code are LShift Ltd, -%% Cohesive Financial Technologies LLC, and Rabbit Technologies Ltd. -%% -%% Portions created before 22-Nov-2008 00:00:00 GMT by LShift Ltd, -%% Cohesive Financial Technologies LLC, or Rabbit Technologies Ltd -%% are Copyright (C) 2007-2008 LShift Ltd, Cohesive Financial -%% Technologies LLC, and Rabbit Technologies Ltd. -%% -%% Portions created by LShift Ltd are Copyright (C) 2007-2010 LShift -%% Ltd. Portions created by Cohesive Financial Technologies LLC are -%% Copyright (C) 2007-2010 Cohesive Financial Technologies -%% LLC. Portions created by Rabbit Technologies Ltd are Copyright -%% (C) 2007-2010 Rabbit Technologies Ltd. -%% -%% All Rights Reserved. -%% -%% Contributor(s): ______________________________________. +%% The Initial Developer of the Original Code is VMware, Inc. +%% Copyright (c) 2007-2011 VMware, Inc. All rights reserved. %% -module(rabbit_error_logger). @@ -39,7 +24,8 @@ -export([boot/0]). --export([init/1, terminate/2, code_change/3, handle_call/2, handle_event/2, handle_info/2]). +-export([init/1, terminate/2, code_change/3, handle_call/2, handle_event/2, + handle_info/2]). boot() -> {ok, DefaultVHost} = application:get_env(default_vhost), @@ -48,7 +34,7 @@ init([DefaultVHost]) -> #exchange{} = rabbit_exchange:declare( rabbit_misc:r(DefaultVHost, exchange, ?LOG_EXCH_NAME), - topic, true, false, []), + topic, true, false, false, []), {ok, #resource{virtual_host = DefaultVHost, kind = exchange, name = ?LOG_EXCH_NAME}}. diff -Nru rabbitmq-server-1.7.2/src/rabbit_error_logger_file_h.erl rabbitmq-server-2.3.1/src/rabbit_error_logger_file_h.erl --- rabbitmq-server-1.7.2/src/rabbit_error_logger_file_h.erl 2010-02-15 16:01:03.000000000 +0000 +++ rabbitmq-server-2.3.1/src/rabbit_error_logger_file_h.erl 2011-02-03 12:47:35.000000000 +0000 @@ -1,39 +1,25 @@ -%% The contents of this file are subject to the Mozilla Public License -%% Version 1.1 (the "License"); you may not use this file except in -%% compliance with the License. You may obtain a copy of the License at -%% http://www.mozilla.org/MPL/ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License +%% at http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and +%% limitations under the License. %% -%% Software distributed under the License is distributed on an "AS IS" -%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the -%% License for the specific language governing rights and limitations -%% under the License. +%% The Original Code is RabbitMQ. %% -%% The Original Code is RabbitMQ. -%% -%% The Initial Developers of the Original Code are LShift Ltd, -%% Cohesive Financial Technologies LLC, and Rabbit Technologies Ltd. -%% -%% Portions created before 22-Nov-2008 00:00:00 GMT by LShift Ltd, -%% Cohesive Financial Technologies LLC, or Rabbit Technologies Ltd -%% are Copyright (C) 2007-2008 LShift Ltd, Cohesive Financial -%% Technologies LLC, and Rabbit Technologies Ltd. -%% -%% Portions created by LShift Ltd are Copyright (C) 2007-2010 LShift -%% Ltd. Portions created by Cohesive Financial Technologies LLC are -%% Copyright (C) 2007-2010 Cohesive Financial Technologies -%% LLC. Portions created by Rabbit Technologies Ltd are Copyright -%% (C) 2007-2010 Rabbit Technologies Ltd. -%% -%% All Rights Reserved. -%% -%% Contributor(s): ______________________________________. +%% The Initial Developer of the Original Code is VMware, Inc. +%% Copyright (c) 2007-2011 VMware, Inc. All rights reserved. %% -module(rabbit_error_logger_file_h). -behaviour(gen_event). --export([init/1, handle_event/2, handle_call/2, handle_info/2, terminate/2, code_change/3]). +-export([init/1, handle_event/2, handle_call/2, handle_info/2, terminate/2, + code_change/3]). %% rabbit_error_logger_file_h is a wrapper around the error_logger_file_h %% module because the original's init/1 does not match properly diff -Nru rabbitmq-server-1.7.2/src/rabbit_event.erl rabbitmq-server-2.3.1/src/rabbit_event.erl --- rabbitmq-server-1.7.2/src/rabbit_event.erl 1970-01-01 00:00:00.000000000 +0000 +++ rabbitmq-server-2.3.1/src/rabbit_event.erl 2011-02-03 12:47:35.000000000 +0000 @@ -0,0 +1,144 @@ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License +%% at http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and +%% limitations under the License. +%% +%% The Original Code is RabbitMQ. +%% +%% The Initial Developer of the Original Code is VMware, Inc. +%% Copyright (c) 2007-2011 VMware, Inc. All rights reserved. +%% + +-module(rabbit_event). + +-include("rabbit.hrl"). + +-export([start_link/0]). +-export([init_stats_timer/0, ensure_stats_timer/2, stop_stats_timer/1]). +-export([reset_stats_timer/1]). +-export([stats_level/1, if_enabled/2]). +-export([notify/2, notify_if/3]). + +%%---------------------------------------------------------------------------- + +-record(state, {level, timer}). + +%%---------------------------------------------------------------------------- + +-ifdef(use_specs). + +-export_type([event_type/0, event_props/0, event_timestamp/0, event/0]). + +-type(event_type() :: atom()). +-type(event_props() :: term()). +-type(event_timestamp() :: + {non_neg_integer(), non_neg_integer(), non_neg_integer()}). + +-type(event() :: #event { + type :: event_type(), + props :: event_props(), + timestamp :: event_timestamp() + }). + +-type(level() :: 'none' | 'coarse' | 'fine'). + +-opaque(state() :: #state { + level :: level(), + timer :: atom() + }). + +-type(timer_fun() :: fun (() -> 'ok')). + +-spec(start_link/0 :: () -> rabbit_types:ok_pid_or_error()). +-spec(init_stats_timer/0 :: () -> state()). +-spec(ensure_stats_timer/2 :: (state(), timer_fun()) -> state()). +-spec(stop_stats_timer/1 :: (state()) -> state()). +-spec(reset_stats_timer/1 :: (state()) -> state()). +-spec(stats_level/1 :: (state()) -> level()). +-spec(if_enabled/2 :: (state(), timer_fun()) -> 'ok'). +-spec(notify/2 :: (event_type(), event_props()) -> 'ok'). +-spec(notify_if/3 :: (boolean(), event_type(), event_props()) -> 'ok'). + +-endif. + +%%---------------------------------------------------------------------------- + +start_link() -> + gen_event:start_link({local, ?MODULE}). + +%% The idea is, for each stat-emitting object: +%% +%% On startup: +%% Timer = init_stats_timer() +%% notify(created event) +%% if_enabled(internal_emit_stats) - so we immediately send something +%% +%% On wakeup: +%% ensure_stats_timer(Timer, emit_stats) +%% (Note we can't emit stats immediately, the timer may have fired 1ms ago.) +%% +%% emit_stats: +%% if_enabled(internal_emit_stats) +%% reset_stats_timer(Timer) - just bookkeeping +%% +%% Pre-hibernation: +%% if_enabled(internal_emit_stats) +%% stop_stats_timer(Timer) +%% +%% internal_emit_stats: +%% notify(stats) + +init_stats_timer() -> + {ok, StatsLevel} = application:get_env(rabbit, collect_statistics), + #state{level = StatsLevel, timer = undefined}. + +ensure_stats_timer(State = #state{level = none}, _Fun) -> + State; +ensure_stats_timer(State = #state{timer = undefined}, Fun) -> + {ok, TRef} = timer:apply_after(?STATS_INTERVAL, + erlang, apply, [Fun, []]), + State#state{timer = TRef}; +ensure_stats_timer(State, _Fun) -> + State. + +stop_stats_timer(State = #state{level = none}) -> + State; +stop_stats_timer(State = #state{timer = undefined}) -> + State; +stop_stats_timer(State = #state{timer = TRef}) -> + {ok, cancel} = timer:cancel(TRef), + State#state{timer = undefined}. + +reset_stats_timer(State) -> + State#state{timer = undefined}. + +stats_level(#state{level = Level}) -> + Level. + +if_enabled(#state{level = none}, _Fun) -> + ok; +if_enabled(_State, Fun) -> + Fun(), + ok. + +notify_if(true, Type, Props) -> notify(Type, Props); +notify_if(false, _Type, _Props) -> ok. + +notify(Type, Props) -> + try + %% TODO: switch to os:timestamp() when we drop support for + %% Erlang/OTP < R13B01 + gen_event:notify(rabbit_event, #event{type = Type, + props = Props, + timestamp = now()}) + catch error:badarg -> + %% badarg means rabbit_event is no longer registered. We never + %% unregister it so the great likelihood is that we're shutting + %% down the broker but some events were backed up. Ignore it. + ok + end. diff -Nru rabbitmq-server-1.7.2/src/rabbit_exchange.erl rabbitmq-server-2.3.1/src/rabbit_exchange.erl --- rabbitmq-server-1.7.2/src/rabbit_exchange.erl 2010-02-15 16:01:03.000000000 +0000 +++ rabbitmq-server-2.3.1/src/rabbit_exchange.erl 2011-02-03 12:47:35.000000000 +0000 @@ -1,163 +1,188 @@ -%% The contents of this file are subject to the Mozilla Public License -%% Version 1.1 (the "License"); you may not use this file except in -%% compliance with the License. You may obtain a copy of the License at -%% http://www.mozilla.org/MPL/ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License +%% at http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and +%% limitations under the License. %% -%% Software distributed under the License is distributed on an "AS IS" -%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the -%% License for the specific language governing rights and limitations -%% under the License. +%% The Original Code is RabbitMQ. %% -%% The Original Code is RabbitMQ. -%% -%% The Initial Developers of the Original Code are LShift Ltd, -%% Cohesive Financial Technologies LLC, and Rabbit Technologies Ltd. -%% -%% Portions created before 22-Nov-2008 00:00:00 GMT by LShift Ltd, -%% Cohesive Financial Technologies LLC, or Rabbit Technologies Ltd -%% are Copyright (C) 2007-2008 LShift Ltd, Cohesive Financial -%% Technologies LLC, and Rabbit Technologies Ltd. -%% -%% Portions created by LShift Ltd are Copyright (C) 2007-2010 LShift -%% Ltd. Portions created by Cohesive Financial Technologies LLC are -%% Copyright (C) 2007-2010 Cohesive Financial Technologies -%% LLC. Portions created by Rabbit Technologies Ltd are Copyright -%% (C) 2007-2010 Rabbit Technologies Ltd. -%% -%% All Rights Reserved. -%% -%% Contributor(s): ______________________________________. +%% The Initial Developer of the Original Code is VMware, Inc. +%% Copyright (c) 2007-2011 VMware, Inc. All rights reserved. %% -module(rabbit_exchange). --include_lib("stdlib/include/qlc.hrl"). -include("rabbit.hrl"). -include("rabbit_framing.hrl"). --export([recover/0, declare/5, lookup/1, lookup_or_die/1, - list/1, info_keys/0, info/1, info/2, info_all/1, info_all/2, - publish/2]). --export([add_binding/4, delete_binding/4, list_bindings/1]). --export([delete/2]). --export([delete_queue_bindings/1, delete_transient_queue_bindings/1]). --export([check_type/1, assert_type/2, topic_matches/2, headers_match/2]). - -%% EXTENDED API --export([list_exchange_bindings/1]). --export([list_queue_bindings/1]). - --import(mnesia). --import(sets). --import(lists). --import(qlc). --import(regexp). +-export([recover/0, declare/6, lookup/1, lookup_or_die/1, list/1, info_keys/0, + info/1, info/2, info_all/1, info_all/2, publish/2, delete/2]). +-export([callback/3]). +%% this must be run inside a mnesia tx +-export([maybe_auto_delete/1]). +-export([assert_equivalence/6, assert_args_equivalence/2, check_type/1]). %%---------------------------------------------------------------------------- -ifdef(use_specs). --type(bind_res() :: 'ok' | {'error', - 'queue_not_found' | - 'exchange_not_found' | - 'exchange_and_queue_not_found'}). +-export_type([name/0, type/0]). + +-type(name() :: rabbit_types:r('exchange')). +-type(type() :: atom()). + -spec(recover/0 :: () -> 'ok'). --spec(declare/5 :: (exchange_name(), exchange_type(), boolean(), boolean(), - amqp_table()) -> exchange()). --spec(check_type/1 :: (binary()) -> atom()). --spec(assert_type/2 :: (exchange(), atom()) -> 'ok'). --spec(lookup/1 :: (exchange_name()) -> {'ok', exchange()} | not_found()). --spec(lookup_or_die/1 :: (exchange_name()) -> exchange()). --spec(list/1 :: (vhost()) -> [exchange()]). --spec(info_keys/0 :: () -> [info_key()]). --spec(info/1 :: (exchange()) -> [info()]). --spec(info/2 :: (exchange(), [info_key()]) -> [info()]). --spec(info_all/1 :: (vhost()) -> [[info()]]). --spec(info_all/2 :: (vhost(), [info_key()]) -> [[info()]]). --spec(publish/2 :: (exchange(), delivery()) -> {routing_result(), [pid()]}). --spec(add_binding/4 :: - (exchange_name(), queue_name(), routing_key(), amqp_table()) -> - bind_res() | {'error', 'durability_settings_incompatible'}). --spec(delete_binding/4 :: - (exchange_name(), queue_name(), routing_key(), amqp_table()) -> - bind_res() | {'error', 'binding_not_found'}). --spec(list_bindings/1 :: (vhost()) -> - [{exchange_name(), queue_name(), routing_key(), amqp_table()}]). --spec(delete_queue_bindings/1 :: (queue_name()) -> 'ok'). --spec(delete_transient_queue_bindings/1 :: (queue_name()) -> 'ok'). --spec(topic_matches/2 :: (binary(), binary()) -> boolean()). --spec(headers_match/2 :: (amqp_table(), amqp_table()) -> boolean()). --spec(delete/2 :: (exchange_name(), boolean()) -> - 'ok' | not_found() | {'error', 'in_use'}). --spec(list_queue_bindings/1 :: (queue_name()) -> - [{exchange_name(), routing_key(), amqp_table()}]). --spec(list_exchange_bindings/1 :: (exchange_name()) -> - [{queue_name(), routing_key(), amqp_table()}]). +-spec(declare/6 :: + (name(), type(), boolean(), boolean(), boolean(), + rabbit_framing:amqp_table()) + -> rabbit_types:exchange()). +-spec(check_type/1 :: + (binary()) -> atom() | rabbit_types:connection_exit()). +-spec(assert_equivalence/6 :: + (rabbit_types:exchange(), atom(), boolean(), boolean(), boolean(), + rabbit_framing:amqp_table()) + -> 'ok' | rabbit_types:connection_exit()). +-spec(assert_args_equivalence/2 :: + (rabbit_types:exchange(), rabbit_framing:amqp_table()) + -> 'ok' | rabbit_types:connection_exit()). +-spec(lookup/1 :: + (name()) -> rabbit_types:ok(rabbit_types:exchange()) | + rabbit_types:error('not_found')). +-spec(lookup_or_die/1 :: + (name()) -> rabbit_types:exchange() | + rabbit_types:channel_exit()). +-spec(list/1 :: (rabbit_types:vhost()) -> [rabbit_types:exchange()]). +-spec(info_keys/0 :: () -> rabbit_types:info_keys()). +-spec(info/1 :: (rabbit_types:exchange()) -> rabbit_types:infos()). +-spec(info/2 :: + (rabbit_types:exchange(), rabbit_types:info_keys()) + -> rabbit_types:infos()). +-spec(info_all/1 :: (rabbit_types:vhost()) -> [rabbit_types:infos()]). +-spec(info_all/2 ::(rabbit_types:vhost(), rabbit_types:info_keys()) + -> [rabbit_types:infos()]). +-spec(publish/2 :: (rabbit_types:exchange(), rabbit_types:delivery()) + -> {rabbit_router:routing_result(), [pid()]}). +-spec(delete/2 :: + (name(), boolean())-> 'ok' | + rabbit_types:error('not_found') | + rabbit_types:error('in_use')). +-spec(maybe_auto_delete/1:: + (rabbit_types:exchange()) + -> 'not_deleted' | {'deleted', rabbit_binding:deletions()}). +-spec(callback/3:: (rabbit_types:exchange(), atom(), [any()]) -> 'ok'). -endif. %%---------------------------------------------------------------------------- --define(INFO_KEYS, [name, type, durable, auto_delete, arguments]. +-define(INFO_KEYS, [name, type, durable, auto_delete, internal, arguments]). recover() -> - ok = rabbit_misc:table_foreach( - fun(Exchange) -> ok = mnesia:write(rabbit_exchange, - Exchange, write) - end, rabbit_durable_exchange), - ok = rabbit_misc:table_foreach( - fun(Route) -> {_, ReverseRoute} = route_with_reverse(Route), - ok = mnesia:write(rabbit_route, - Route, write), - ok = mnesia:write(rabbit_reverse_route, - ReverseRoute, write) - end, rabbit_durable_route). - -declare(ExchangeName, Type, Durable, AutoDelete, Args) -> - Exchange = #exchange{name = ExchangeName, - type = Type, - durable = Durable, - auto_delete = AutoDelete, - arguments = Args}, + Xs = rabbit_misc:table_fold( + fun (X, Acc) -> + ok = mnesia:write(rabbit_exchange, X, write), + [X | Acc] + end, [], rabbit_durable_exchange), + Bs = rabbit_binding:recover(), + recover_with_bindings( + lists:keysort(#binding.source, Bs), + lists:keysort(#exchange.name, Xs), []). + +recover_with_bindings([B = #binding{source = XName} | Rest], + Xs = [#exchange{name = XName} | _], + Bindings) -> + recover_with_bindings(Rest, Xs, [B | Bindings]); +recover_with_bindings(Bs, [X = #exchange{type = Type} | Xs], Bindings) -> + (type_to_module(Type)):recover(X, Bindings), + recover_with_bindings(Bs, Xs, []); +recover_with_bindings([], [], []) -> + ok. + +declare(XName, Type, Durable, AutoDelete, Internal, Args) -> + X = #exchange{name = XName, + type = Type, + durable = Durable, + auto_delete = AutoDelete, + internal = Internal, + arguments = Args}, + %% We want to upset things if it isn't ok + ok = (type_to_module(Type)):validate(X), rabbit_misc:execute_mnesia_transaction( fun () -> - case mnesia:wread({rabbit_exchange, ExchangeName}) of - [] -> ok = mnesia:write(rabbit_exchange, Exchange, write), - if Durable -> - ok = mnesia:write(rabbit_durable_exchange, - Exchange, write); - true -> ok - end, - Exchange; - [ExistingX] -> ExistingX + case mnesia:wread({rabbit_exchange, XName}) of + [] -> + ok = mnesia:write(rabbit_exchange, X, write), + ok = case Durable of + true -> mnesia:write(rabbit_durable_exchange, + X, write); + false -> ok + end, + {new, X}; + [ExistingX] -> + {existing, ExistingX} end + end, + fun ({new, Exchange}, Tx) -> + callback(Exchange, create, [Tx, Exchange]), + rabbit_event:notify_if(not Tx, exchange_created, info(Exchange)), + Exchange; + ({existing, Exchange}, _Tx) -> + Exchange; + (Err, _Tx) -> + Err end). -check_type(<<"fanout">>) -> - fanout; -check_type(<<"direct">>) -> - direct; -check_type(<<"topic">>) -> - topic; -check_type(<<"headers">>) -> - headers; -check_type(T) -> - rabbit_misc:protocol_error( - command_invalid, "invalid exchange type '~s'", [T]). +%% Used with atoms from records; e.g., the type is expected to exist. +type_to_module(T) -> + {ok, Module} = rabbit_registry:lookup_module(exchange, T), + Module. + +%% Used with binaries sent over the wire; the type may not exist. +check_type(TypeBin) -> + case rabbit_registry:binary_to_type(TypeBin) of + {error, not_found} -> + rabbit_misc:protocol_error( + command_invalid, "unknown exchange type '~s'", [TypeBin]); + T -> + case rabbit_registry:lookup_module(exchange, T) of + {error, not_found} -> rabbit_misc:protocol_error( + command_invalid, + "invalid exchange type '~s'", [T]); + {ok, _Module} -> T + end + end. -assert_type(#exchange{ type = ActualType }, RequiredType) - when ActualType == RequiredType -> - ok; -assert_type(#exchange{ name = Name, type = ActualType }, RequiredType) -> +assert_equivalence(X = #exchange{ durable = Durable, + auto_delete = AutoDelete, + internal = Internal, + type = Type}, + Type, Durable, AutoDelete, Internal, RequiredArgs) -> + (type_to_module(Type)):assert_args_equivalence(X, RequiredArgs); +assert_equivalence(#exchange{ name = Name }, + _Type, _Durable, _Internal, _AutoDelete, _Args) -> rabbit_misc:protocol_error( - not_allowed, "cannot redeclare ~s of type '~s' with type '~s'", - [rabbit_misc:rs(Name), ActualType, RequiredType]). + precondition_failed, + "cannot redeclare ~s with different type, durable, " + "internal or autodelete value", + [rabbit_misc:rs(Name)]). + +assert_args_equivalence(#exchange{ name = Name, arguments = Args }, + RequiredArgs) -> + %% The spec says "Arguments are compared for semantic + %% equivalence". The only arg we care about is + %% "alternate-exchange". + rabbit_misc:assert_args_equivalence(Args, RequiredArgs, Name, + [<<"alternate-exchange">>]). lookup(Name) -> rabbit_misc:dirty_read({rabbit_exchange, Name}). lookup_or_die(Name) -> case lookup(Name) of - {ok, X} -> X; + {ok, X} -> X; {error, not_found} -> rabbit_misc:not_found(Name) end. @@ -179,6 +204,7 @@ i(type, #exchange{type = Type}) -> Type; i(durable, #exchange{durable = Durable}) -> Durable; i(auto_delete, #exchange{auto_delete = AutoDelete}) -> AutoDelete; +i(internal, #exchange{internal = Internal}) -> Internal; i(arguments, #exchange{arguments = Arguments}) -> Arguments; i(Item, _) -> throw({bad_argument, Item}). @@ -190,425 +216,95 @@ info_all(VHostPath, Items) -> map(VHostPath, fun (X) -> info(X, Items) end). -publish(X, Delivery) -> - publish(X, [], Delivery). - -publish(X, Seen, Delivery = #delivery{ - message = #basic_message{routing_key = RK, content = C}}) -> - case rabbit_router:deliver(route(X, RK, C), Delivery) of - {_, []} = R -> - #exchange{name = XName, arguments = Args} = X, - case rabbit_misc:r_arg(XName, exchange, Args, - <<"alternate-exchange">>) of - undefined -> - R; - AName -> - NewSeen = [XName | Seen], - case lists:member(AName, NewSeen) of - true -> - R; - false -> - case lookup(AName) of - {ok, AX} -> - publish(AX, NewSeen, Delivery); - {error, not_found} -> - rabbit_log:warning( - "alternate exchange for ~s " - "does not exist: ~s", - [rabbit_misc:rs(XName), - rabbit_misc:rs(AName)]), - R - end - end - end; - R -> - R - end. - -%% return the list of qpids to which a message with a given routing -%% key, sent to a particular exchange, should be delivered. -%% -%% The function ensures that a qpid appears in the return list exactly -%% as many times as a message should be delivered to it. With the -%% current exchange types that is at most once. -route(X = #exchange{type = topic}, RoutingKey, _Content) -> - match_bindings(X, fun (#binding{key = BindingKey}) -> - topic_matches(BindingKey, RoutingKey) - end); - -route(X = #exchange{type = headers}, _RoutingKey, Content) -> - Headers = case (Content#content.properties)#'P_basic'.headers of - undefined -> []; - H -> sort_arguments(H) - end, - match_bindings(X, fun (#binding{args = Spec}) -> - headers_match(Spec, Headers) - end); - -route(X = #exchange{type = fanout}, _RoutingKey, _Content) -> - match_routing_key(X, '_'); - -route(X = #exchange{type = direct}, RoutingKey, _Content) -> - match_routing_key(X, RoutingKey). - -sort_arguments(Arguments) -> - lists:keysort(1, Arguments). - -%% TODO: Maybe this should be handled by a cursor instead. -%% TODO: This causes a full scan for each entry with the same exchange -match_bindings(#exchange{name = Name}, Match) -> - Query = qlc:q([QName || #route{binding = Binding = #binding{ - exchange_name = ExchangeName, - queue_name = QName}} <- - mnesia:table(rabbit_route), - ExchangeName == Name, - Match(Binding)]), - lookup_qpids( - try - mnesia:async_dirty(fun qlc:e/1, [Query]) - catch exit:{aborted, {badarg, _}} -> - %% work around OTP-7025, which was fixed in R12B-1, by - %% falling back on a less efficient method - [QName || #route{binding = Binding = #binding{ - queue_name = QName}} <- - mnesia:dirty_match_object( - rabbit_route, - #route{binding = #binding{exchange_name = Name, - _ = '_'}}), - Match(Binding)] - end). - -match_routing_key(#exchange{name = Name}, RoutingKey) -> - MatchHead = #route{binding = #binding{exchange_name = Name, - queue_name = '$1', - key = RoutingKey, - _ = '_'}}, - lookup_qpids(mnesia:dirty_select(rabbit_route, [{MatchHead, [], ['$1']}])). - -lookup_qpids(Queues) -> - sets:fold( - fun(Key, Acc) -> - case mnesia:dirty_read({rabbit_queue, Key}) of - [#amqqueue{pid = QPid}] -> [QPid | Acc]; - [] -> Acc - end - end, [], sets:from_list(Queues)). - -%% TODO: Should all of the route and binding management not be -%% refactored to its own module, especially seeing as unbind will have -%% to be implemented for 0.91 ? - -delete_exchange_bindings(ExchangeName) -> - [begin - ok = mnesia:delete_object(rabbit_reverse_route, - reverse_route(Route), write), - ok = delete_forward_routes(Route) - end || Route <- mnesia:match_object( - rabbit_route, - #route{binding = #binding{exchange_name = ExchangeName, - _ = '_'}}, - write)], - ok. - -delete_queue_bindings(QueueName) -> - delete_queue_bindings(QueueName, fun delete_forward_routes/1). - -delete_transient_queue_bindings(QueueName) -> - delete_queue_bindings(QueueName, fun delete_transient_forward_routes/1). - -delete_queue_bindings(QueueName, FwdDeleteFun) -> - Exchanges = exchanges_for_queue(QueueName), - [begin - ok = FwdDeleteFun(reverse_route(Route)), - ok = mnesia:delete_object(rabbit_reverse_route, Route, write) - end || Route <- mnesia:match_object( - rabbit_reverse_route, - reverse_route( - #route{binding = #binding{queue_name = QueueName, - _ = '_'}}), - write)], - [begin - [X] = mnesia:read({rabbit_exchange, ExchangeName}), - ok = maybe_auto_delete(X) - end || ExchangeName <- Exchanges], - ok. - -delete_forward_routes(Route) -> - ok = mnesia:delete_object(rabbit_route, Route, write), - ok = mnesia:delete_object(rabbit_durable_route, Route, write). - -delete_transient_forward_routes(Route) -> - ok = mnesia:delete_object(rabbit_route, Route, write). - -exchanges_for_queue(QueueName) -> - MatchHead = reverse_route( - #route{binding = #binding{exchange_name = '$1', - queue_name = QueueName, - _ = '_'}}), - sets:to_list( - sets:from_list( - mnesia:select(rabbit_reverse_route, [{MatchHead, [], ['$1']}]))). - -contains(Table, MatchHead) -> - try - continue(mnesia:select(Table, [{MatchHead, [], ['$_']}], 1, read)) - catch exit:{aborted, {badarg, _}} -> - %% work around OTP-7025, which was fixed in R12B-1, by - %% falling back on a less efficient method - case mnesia:match_object(Table, MatchHead, read) of - [] -> false; - [_|_] -> true - end +publish(X = #exchange{name = XName}, Delivery) -> + rabbit_router:deliver( + route(Delivery, {queue:from_list([X]), XName, []}), + Delivery). + +route(Delivery, {WorkList, SeenXs, QNames}) -> + case queue:out(WorkList) of + {empty, _WorkList} -> + lists:usort(QNames); + {{value, X = #exchange{type = Type}}, WorkList1} -> + DstNames = process_alternate( + X, ((type_to_module(Type)):route(X, Delivery))), + route(Delivery, + lists:foldl(fun process_route/2, {WorkList1, SeenXs, QNames}, + DstNames)) end. -continue('$end_of_table') -> false; -continue({[_|_], _}) -> true; -continue({[], Continuation}) -> continue(mnesia:select(Continuation)). +process_alternate(#exchange{name = XName, arguments = Args}, []) -> + case rabbit_misc:r_arg(XName, exchange, Args, <<"alternate-exchange">>) of + undefined -> []; + AName -> [AName] + end; +process_alternate(_X, Results) -> + Results. + +process_route(#resource{kind = exchange} = XName, + {_WorkList, XName, _QNames} = Acc) -> + Acc; +process_route(#resource{kind = exchange} = XName, + {WorkList, #resource{kind = exchange} = SeenX, QNames}) -> + {case lookup(XName) of + {ok, X} -> queue:in(X, WorkList); + {error, not_found} -> WorkList + end, gb_sets:from_list([SeenX, XName]), QNames}; +process_route(#resource{kind = exchange} = XName, + {WorkList, SeenXs, QNames} = Acc) -> + case gb_sets:is_element(XName, SeenXs) of + true -> Acc; + false -> {case lookup(XName) of + {ok, X} -> queue:in(X, WorkList); + {error, not_found} -> WorkList + end, gb_sets:add_element(XName, SeenXs), QNames} + end; +process_route(#resource{kind = queue} = QName, + {WorkList, SeenXs, QNames}) -> + {WorkList, SeenXs, [QName | QNames]}. -call_with_exchange(Exchange, Fun) -> +call_with_exchange(XName, Fun, PrePostCommitFun) -> rabbit_misc:execute_mnesia_transaction( - fun() -> case mnesia:read({rabbit_exchange, Exchange}) of + fun () -> case mnesia:read({rabbit_exchange, XName}) of [] -> {error, not_found}; [X] -> Fun(X) end - end). - -call_with_exchange_and_queue(Exchange, Queue, Fun) -> - rabbit_misc:execute_mnesia_transaction( - fun() -> case {mnesia:read({rabbit_exchange, Exchange}), - mnesia:read({rabbit_queue, Queue})} of - {[X], [Q]} -> Fun(X, Q); - {[ ], [_]} -> {error, exchange_not_found}; - {[_], [ ]} -> {error, queue_not_found}; - {[ ], [ ]} -> {error, exchange_and_queue_not_found} - end - end). - -add_binding(ExchangeName, QueueName, RoutingKey, Arguments) -> - binding_action( - ExchangeName, QueueName, RoutingKey, Arguments, - fun (X, Q, B) -> - if Q#amqqueue.durable and not(X#exchange.durable) -> - {error, durability_settings_incompatible}; - true -> ok = sync_binding(B, Q#amqqueue.durable, - fun mnesia:write/3) - end - end). - -delete_binding(ExchangeName, QueueName, RoutingKey, Arguments) -> - binding_action( - ExchangeName, QueueName, RoutingKey, Arguments, - fun (X, Q, B) -> - case mnesia:match_object(rabbit_route, #route{binding = B}, - write) of - [] -> {error, binding_not_found}; - _ -> ok = sync_binding(B, Q#amqqueue.durable, - fun mnesia:delete_object/3), - maybe_auto_delete(X) - end - end). + end, PrePostCommitFun). -binding_action(ExchangeName, QueueName, RoutingKey, Arguments, Fun) -> - call_with_exchange_and_queue( - ExchangeName, QueueName, - fun (X, Q) -> - Fun(X, Q, #binding{exchange_name = ExchangeName, - queue_name = QueueName, - key = RoutingKey, - args = sort_arguments(Arguments)}) +delete(XName, IfUnused) -> + call_with_exchange( + XName, + case IfUnused of + true -> fun conditional_delete/1; + false -> fun unconditional_delete/1 + end, + fun ({deleted, X, Bs, Deletions}, Tx) -> + ok = rabbit_binding:process_deletions( + rabbit_binding:add_deletion( + XName, {X, deleted, Bs}, Deletions), Tx); + (Error = {error, _InUseOrNotFound}, _Tx) -> + Error end). -sync_binding(Binding, Durable, Fun) -> - ok = case Durable of - true -> Fun(rabbit_durable_route, - #route{binding = Binding}, write); - false -> ok - end, - {Route, ReverseRoute} = route_with_reverse(Binding), - ok = Fun(rabbit_route, Route, write), - ok = Fun(rabbit_reverse_route, ReverseRoute, write), - ok. - -list_bindings(VHostPath) -> - [{ExchangeName, QueueName, RoutingKey, Arguments} || - #route{binding = #binding{ - exchange_name = ExchangeName, - key = RoutingKey, - queue_name = QueueName, - args = Arguments}} - <- mnesia:dirty_match_object( - rabbit_route, - #route{binding = #binding{ - exchange_name = rabbit_misc:r(VHostPath, exchange), - _ = '_'}, - _ = '_'})]. - -route_with_reverse(#route{binding = Binding}) -> - route_with_reverse(Binding); -route_with_reverse(Binding = #binding{}) -> - Route = #route{binding = Binding}, - {Route, reverse_route(Route)}. - -reverse_route(#route{binding = Binding}) -> - #reverse_route{reverse_binding = reverse_binding(Binding)}; - -reverse_route(#reverse_route{reverse_binding = Binding}) -> - #route{binding = reverse_binding(Binding)}. - -reverse_binding(#reverse_binding{exchange_name = Exchange, - queue_name = Queue, - key = Key, - args = Args}) -> - #binding{exchange_name = Exchange, - queue_name = Queue, - key = Key, - args = Args}; - -reverse_binding(#binding{exchange_name = Exchange, - queue_name = Queue, - key = Key, - args = Args}) -> - #reverse_binding{exchange_name = Exchange, - queue_name = Queue, - key = Key, - args = Args}. - -default_headers_match_kind() -> all. - -parse_x_match(<<"all">>) -> all; -parse_x_match(<<"any">>) -> any; -parse_x_match(Other) -> - rabbit_log:warning("Invalid x-match field value ~p; expected all or any", - [Other]), - default_headers_match_kind(). - -%% Horrendous matching algorithm. Depends for its merge-like -%% (linear-time) behaviour on the lists:keysort (sort_arguments) that -%% route/3 and {add,delete}_binding/4 do. -%% -%% !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! -%% In other words: REQUIRES BOTH PATTERN AND DATA TO BE SORTED ASCENDING BY KEY. -%% !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! -%% -headers_match(Pattern, Data) -> - MatchKind = case lists:keysearch(<<"x-match">>, 1, Pattern) of - {value, {_, longstr, MK}} -> parse_x_match(MK); - {value, {_, Type, MK}} -> - rabbit_log:warning("Invalid x-match field type ~p " - "(value ~p); expected longstr", - [Type, MK]), - default_headers_match_kind(); - _ -> default_headers_match_kind() - end, - headers_match(Pattern, Data, true, false, MatchKind). - -headers_match([], _Data, AllMatch, _AnyMatch, all) -> - AllMatch; -headers_match([], _Data, _AllMatch, AnyMatch, any) -> - AnyMatch; -headers_match([{<<"x-", _/binary>>, _PT, _PV} | PRest], Data, - AllMatch, AnyMatch, MatchKind) -> - headers_match(PRest, Data, AllMatch, AnyMatch, MatchKind); -headers_match(_Pattern, [], _AllMatch, AnyMatch, MatchKind) -> - headers_match([], [], false, AnyMatch, MatchKind); -headers_match(Pattern = [{PK, _PT, _PV} | _], [{DK, _DT, _DV} | DRest], - AllMatch, AnyMatch, MatchKind) when PK > DK -> - headers_match(Pattern, DRest, AllMatch, AnyMatch, MatchKind); -headers_match([{PK, _PT, _PV} | PRest], Data = [{DK, _DT, _DV} | _], - _AllMatch, AnyMatch, MatchKind) when PK < DK -> - headers_match(PRest, Data, false, AnyMatch, MatchKind); -headers_match([{PK, PT, PV} | PRest], [{DK, DT, DV} | DRest], - AllMatch, AnyMatch, MatchKind) when PK == DK -> - {AllMatch1, AnyMatch1} = - if - %% It's not properly specified, but a "no value" in a - %% pattern field is supposed to mean simple presence of - %% the corresponding data field. I've interpreted that to - %% mean a type of "void" for the pattern field. - PT == void -> {AllMatch, true}; - %% Similarly, it's not specified, but I assume that a - %% mismatched type causes a mismatched value. - PT =/= DT -> {false, AnyMatch}; - PV == DV -> {AllMatch, true}; - true -> {false, AnyMatch} - end, - headers_match(PRest, DRest, AllMatch1, AnyMatch1, MatchKind). - -split_topic_key(Key) -> - {ok, KeySplit} = regexp:split(binary_to_list(Key), "\\."), - KeySplit. - -topic_matches(PatternKey, RoutingKey) -> - P = split_topic_key(PatternKey), - R = split_topic_key(RoutingKey), - topic_matches1(P, R). - -topic_matches1(["#"], _R) -> - true; -topic_matches1(["#" | PTail], R) -> - last_topic_match(PTail, [], lists:reverse(R)); -topic_matches1([], []) -> - true; -topic_matches1(["*" | PatRest], [_ | ValRest]) -> - topic_matches1(PatRest, ValRest); -topic_matches1([PatElement | PatRest], [ValElement | ValRest]) when PatElement == ValElement -> - topic_matches1(PatRest, ValRest); -topic_matches1(_, _) -> - false. - -last_topic_match(P, R, []) -> - topic_matches1(P, R); -last_topic_match(P, R, [BacktrackNext | BacktrackList]) -> - topic_matches1(P, R) or last_topic_match(P, [BacktrackNext | R], BacktrackList). - -delete(ExchangeName, _IfUnused = true) -> - call_with_exchange(ExchangeName, fun conditional_delete/1); -delete(ExchangeName, _IfUnused = false) -> - call_with_exchange(ExchangeName, fun unconditional_delete/1). - maybe_auto_delete(#exchange{auto_delete = false}) -> - ok; -maybe_auto_delete(Exchange = #exchange{auto_delete = true}) -> - conditional_delete(Exchange), - ok. + not_deleted; +maybe_auto_delete(#exchange{auto_delete = true} = X) -> + case conditional_delete(X) of + {error, in_use} -> not_deleted; + {deleted, X, [], Deletions} -> {deleted, Deletions} + end. -conditional_delete(Exchange = #exchange{name = ExchangeName}) -> - Match = #route{binding = #binding{exchange_name = ExchangeName, _ = '_'}}, - %% we need to check for durable routes here too in case a bunch of - %% routes to durable queues have been removed temporarily as a - %% result of a node failure - case contains(rabbit_route, Match) orelse contains(rabbit_durable_route, Match) of - false -> unconditional_delete(Exchange); +callback(#exchange{type = XType}, Fun, Args) -> + apply(type_to_module(XType), Fun, Args). + +conditional_delete(X = #exchange{name = XName}) -> + case rabbit_binding:has_for_source(XName) of + false -> unconditional_delete(X); true -> {error, in_use} end. -unconditional_delete(#exchange{name = ExchangeName}) -> - ok = delete_exchange_bindings(ExchangeName), - ok = mnesia:delete({rabbit_durable_exchange, ExchangeName}), - ok = mnesia:delete({rabbit_exchange, ExchangeName}). - -%%---------------------------------------------------------------------------- -%% EXTENDED API -%% These are API calls that are not used by the server internally, -%% they are exported for embedded clients to use - -%% This is currently used in mod_rabbit.erl (XMPP) and expects this to -%% return {QueueName, RoutingKey, Arguments} tuples -list_exchange_bindings(ExchangeName) -> - Route = #route{binding = #binding{exchange_name = ExchangeName, - _ = '_'}}, - [{QueueName, RoutingKey, Arguments} || - #route{binding = #binding{queue_name = QueueName, - key = RoutingKey, - args = Arguments}} - <- mnesia:dirty_match_object(rabbit_route, Route)]. - -% Refactoring is left as an exercise for the reader -list_queue_bindings(QueueName) -> - Route = #route{binding = #binding{queue_name = QueueName, - _ = '_'}}, - [{ExchangeName, RoutingKey, Arguments} || - #route{binding = #binding{exchange_name = ExchangeName, - key = RoutingKey, - args = Arguments}} - <- mnesia:dirty_match_object(rabbit_route, Route)]. +unconditional_delete(X = #exchange{name = XName}) -> + ok = mnesia:delete({rabbit_durable_exchange, XName}), + ok = mnesia:delete({rabbit_exchange, XName}), + Bindings = rabbit_binding:remove_for_source(XName), + {deleted, X, Bindings, rabbit_binding:remove_for_destination(XName)}. diff -Nru rabbitmq-server-1.7.2/src/rabbit_exchange_type_direct.erl rabbitmq-server-2.3.1/src/rabbit_exchange_type_direct.erl --- rabbitmq-server-1.7.2/src/rabbit_exchange_type_direct.erl 1970-01-01 00:00:00.000000000 +0000 +++ rabbitmq-server-2.3.1/src/rabbit_exchange_type_direct.erl 2011-02-03 12:47:35.000000000 +0000 @@ -0,0 +1,49 @@ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License +%% at http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and +%% limitations under the License. +%% +%% The Original Code is RabbitMQ. +%% +%% The Initial Developer of the Original Code is VMware, Inc. +%% Copyright (c) 2007-2011 VMware, Inc. All rights reserved. +%% + +-module(rabbit_exchange_type_direct). +-include("rabbit.hrl"). + +-behaviour(rabbit_exchange_type). + +-export([description/0, route/2]). +-export([validate/1, create/2, recover/2, delete/3, + add_binding/3, remove_bindings/3, assert_args_equivalence/2]). +-include("rabbit_exchange_type_spec.hrl"). + +-rabbit_boot_step({?MODULE, + [{description, "exchange type direct"}, + {mfa, {rabbit_registry, register, + [exchange, <<"direct">>, ?MODULE]}}, + {requires, rabbit_registry}, + {enables, kernel_ready}]}). + +description() -> + [{name, <<"direct">>}, + {description, <<"AMQP direct exchange, as per the AMQP specification">>}]. + +route(#exchange{name = Name}, + #delivery{message = #basic_message{routing_key = RoutingKey}}) -> + rabbit_router:match_routing_key(Name, RoutingKey). + +validate(_X) -> ok. +create(_Tx, _X) -> ok. +recover(_X, _Bs) -> ok. +delete(_Tx, _X, _Bs) -> ok. +add_binding(_Tx, _X, _B) -> ok. +remove_bindings(_Tx, _X, _Bs) -> ok. +assert_args_equivalence(X, Args) -> + rabbit_exchange:assert_args_equivalence(X, Args). diff -Nru rabbitmq-server-1.7.2/src/rabbit_exchange_type.erl rabbitmq-server-2.3.1/src/rabbit_exchange_type.erl --- rabbitmq-server-1.7.2/src/rabbit_exchange_type.erl 1970-01-01 00:00:00.000000000 +0000 +++ rabbitmq-server-2.3.1/src/rabbit_exchange_type.erl 2011-02-03 12:47:35.000000000 +0000 @@ -0,0 +1,50 @@ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License +%% at http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and +%% limitations under the License. +%% +%% The Original Code is RabbitMQ. +%% +%% The Initial Developer of the Original Code is VMware, Inc. +%% Copyright (c) 2007-2011 VMware, Inc. All rights reserved. +%% + +-module(rabbit_exchange_type). + +-export([behaviour_info/1]). + +behaviour_info(callbacks) -> + [ + {description, 0}, + {route, 2}, + + %% called BEFORE declaration, to check args etc; may exit with #amqp_error{} + {validate, 1}, + + %% called after declaration when previously absent + {create, 2}, + + %% called when recovering + {recover, 2}, + + %% called after exchange deletion. + {delete, 3}, + + %% called after a binding has been added + {add_binding, 3}, + + %% called after bindings have been deleted. + {remove_bindings, 3}, + + %% called when comparing exchanges for equivalence - should return ok or + %% exit with #amqp_error{} + {assert_args_equivalence, 2} + + ]; +behaviour_info(_Other) -> + undefined. diff -Nru rabbitmq-server-1.7.2/src/rabbit_exchange_type_fanout.erl rabbitmq-server-2.3.1/src/rabbit_exchange_type_fanout.erl --- rabbitmq-server-1.7.2/src/rabbit_exchange_type_fanout.erl 1970-01-01 00:00:00.000000000 +0000 +++ rabbitmq-server-2.3.1/src/rabbit_exchange_type_fanout.erl 2011-02-03 12:47:35.000000000 +0000 @@ -0,0 +1,48 @@ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License +%% at http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and +%% limitations under the License. +%% +%% The Original Code is RabbitMQ. +%% +%% The Initial Developer of the Original Code is VMware, Inc. +%% Copyright (c) 2007-2011 VMware, Inc. All rights reserved. +%% + +-module(rabbit_exchange_type_fanout). +-include("rabbit.hrl"). + +-behaviour(rabbit_exchange_type). + +-export([description/0, route/2]). +-export([validate/1, create/2, recover/2, delete/3, add_binding/3, + remove_bindings/3, assert_args_equivalence/2]). +-include("rabbit_exchange_type_spec.hrl"). + +-rabbit_boot_step({?MODULE, + [{description, "exchange type fanout"}, + {mfa, {rabbit_registry, register, + [exchange, <<"fanout">>, ?MODULE]}}, + {requires, rabbit_registry}, + {enables, kernel_ready}]}). + +description() -> + [{name, <<"fanout">>}, + {description, <<"AMQP fanout exchange, as per the AMQP specification">>}]. + +route(#exchange{name = Name}, _Delivery) -> + rabbit_router:match_routing_key(Name, '_'). + +validate(_X) -> ok. +create(_Tx, _X) -> ok. +recover(_X, _Bs) -> ok. +delete(_Tx, _X, _Bs) -> ok. +add_binding(_Tx, _X, _B) -> ok. +remove_bindings(_Tx, _X, _Bs) -> ok. +assert_args_equivalence(X, Args) -> + rabbit_exchange:assert_args_equivalence(X, Args). diff -Nru rabbitmq-server-1.7.2/src/rabbit_exchange_type_headers.erl rabbitmq-server-2.3.1/src/rabbit_exchange_type_headers.erl --- rabbitmq-server-1.7.2/src/rabbit_exchange_type_headers.erl 1970-01-01 00:00:00.000000000 +0000 +++ rabbitmq-server-2.3.1/src/rabbit_exchange_type_headers.erl 2011-02-03 12:47:35.000000000 +0000 @@ -0,0 +1,122 @@ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License +%% at http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and +%% limitations under the License. +%% +%% The Original Code is RabbitMQ. +%% +%% The Initial Developer of the Original Code is VMware, Inc. +%% Copyright (c) 2007-2011 VMware, Inc. All rights reserved. +%% + +-module(rabbit_exchange_type_headers). +-include("rabbit.hrl"). +-include("rabbit_framing.hrl"). + +-behaviour(rabbit_exchange_type). + +-export([description/0, route/2]). +-export([validate/1, create/2, recover/2, delete/3, add_binding/3, + remove_bindings/3, assert_args_equivalence/2]). +-include("rabbit_exchange_type_spec.hrl"). + +-rabbit_boot_step({?MODULE, + [{description, "exchange type headers"}, + {mfa, {rabbit_registry, register, + [exchange, <<"headers">>, ?MODULE]}}, + {requires, rabbit_registry}, + {enables, kernel_ready}]}). + +-ifdef(use_specs). +-spec(headers_match/2 :: (rabbit_framing:amqp_table(), + rabbit_framing:amqp_table()) -> boolean()). +-endif. + +description() -> + [{name, <<"headers">>}, + {description, <<"AMQP headers exchange, as per the AMQP specification">>}]. + +route(#exchange{name = Name}, + #delivery{message = #basic_message{content = Content}}) -> + Headers = case (Content#content.properties)#'P_basic'.headers of + undefined -> []; + H -> rabbit_misc:sort_field_table(H) + end, + rabbit_router:match_bindings( + Name, fun (#binding{args = Spec}) -> headers_match(Spec, Headers) end). + +default_headers_match_kind() -> all. + +parse_x_match(<<"all">>) -> all; +parse_x_match(<<"any">>) -> any; +parse_x_match(Other) -> + rabbit_log:warning("Invalid x-match field value ~p; expected all or any", + [Other]), + default_headers_match_kind(). + +%% Horrendous matching algorithm. Depends for its merge-like +%% (linear-time) behaviour on the lists:keysort +%% (rabbit_misc:sort_field_table) that route/1 and +%% rabbit_binding:{add,remove}/2 do. +%% +%% !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! +%% In other words: REQUIRES BOTH PATTERN AND DATA TO BE SORTED ASCENDING BY KEY. +%% !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! +%% +headers_match(Pattern, Data) -> + MatchKind = case lists:keysearch(<<"x-match">>, 1, Pattern) of + {value, {_, longstr, MK}} -> parse_x_match(MK); + {value, {_, Type, MK}} -> + rabbit_log:warning("Invalid x-match field type ~p " + "(value ~p); expected longstr", + [Type, MK]), + default_headers_match_kind(); + _ -> default_headers_match_kind() + end, + headers_match(Pattern, Data, true, false, MatchKind). + +headers_match([], _Data, AllMatch, _AnyMatch, all) -> + AllMatch; +headers_match([], _Data, _AllMatch, AnyMatch, any) -> + AnyMatch; +headers_match([{<<"x-", _/binary>>, _PT, _PV} | PRest], Data, + AllMatch, AnyMatch, MatchKind) -> + headers_match(PRest, Data, AllMatch, AnyMatch, MatchKind); +headers_match(_Pattern, [], _AllMatch, AnyMatch, MatchKind) -> + headers_match([], [], false, AnyMatch, MatchKind); +headers_match(Pattern = [{PK, _PT, _PV} | _], [{DK, _DT, _DV} | DRest], + AllMatch, AnyMatch, MatchKind) when PK > DK -> + headers_match(Pattern, DRest, AllMatch, AnyMatch, MatchKind); +headers_match([{PK, _PT, _PV} | PRest], Data = [{DK, _DT, _DV} | _], + _AllMatch, AnyMatch, MatchKind) when PK < DK -> + headers_match(PRest, Data, false, AnyMatch, MatchKind); +headers_match([{PK, PT, PV} | PRest], [{DK, DT, DV} | DRest], + AllMatch, AnyMatch, MatchKind) when PK == DK -> + {AllMatch1, AnyMatch1} = + if + %% It's not properly specified, but a "no value" in a + %% pattern field is supposed to mean simple presence of + %% the corresponding data field. I've interpreted that to + %% mean a type of "void" for the pattern field. + PT == void -> {AllMatch, true}; + %% Similarly, it's not specified, but I assume that a + %% mismatched type causes a mismatched value. + PT =/= DT -> {false, AnyMatch}; + PV == DV -> {AllMatch, true}; + true -> {false, AnyMatch} + end, + headers_match(PRest, DRest, AllMatch1, AnyMatch1, MatchKind). + +validate(_X) -> ok. +create(_Tx, _X) -> ok. +recover(_X, _Bs) -> ok. +delete(_Tx, _X, _Bs) -> ok. +add_binding(_Tx, _X, _B) -> ok. +remove_bindings(_Tx, _X, _Bs) -> ok. +assert_args_equivalence(X, Args) -> + rabbit_exchange:assert_args_equivalence(X, Args). diff -Nru rabbitmq-server-1.7.2/src/rabbit_exchange_type_topic.erl rabbitmq-server-2.3.1/src/rabbit_exchange_type_topic.erl --- rabbitmq-server-1.7.2/src/rabbit_exchange_type_topic.erl 1970-01-01 00:00:00.000000000 +0000 +++ rabbitmq-server-2.3.1/src/rabbit_exchange_type_topic.erl 2011-02-03 12:47:35.000000000 +0000 @@ -0,0 +1,88 @@ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License +%% at http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and +%% limitations under the License. +%% +%% The Original Code is RabbitMQ. +%% +%% The Initial Developer of the Original Code is VMware, Inc. +%% Copyright (c) 2007-2011 VMware, Inc. All rights reserved. +%% + +-module(rabbit_exchange_type_topic). +-include("rabbit.hrl"). + +-behaviour(rabbit_exchange_type). + +-export([description/0, route/2]). +-export([validate/1, create/2, recover/2, delete/3, add_binding/3, + remove_bindings/3, assert_args_equivalence/2]). +-include("rabbit_exchange_type_spec.hrl"). + +-rabbit_boot_step({?MODULE, + [{description, "exchange type topic"}, + {mfa, {rabbit_registry, register, + [exchange, <<"topic">>, ?MODULE]}}, + {requires, rabbit_registry}, + {enables, kernel_ready}]}). + +-export([topic_matches/2]). + +-ifdef(use_specs). + +-spec(topic_matches/2 :: (binary(), binary()) -> boolean()). + +-endif. + +description() -> + [{name, <<"topic">>}, + {description, <<"AMQP topic exchange, as per the AMQP specification">>}]. + +route(#exchange{name = Name}, + #delivery{message = #basic_message{routing_key = RoutingKey}}) -> + rabbit_router:match_bindings(Name, + fun (#binding{key = BindingKey}) -> + topic_matches(BindingKey, RoutingKey) + end). + +split_topic_key(Key) -> + string:tokens(binary_to_list(Key), "."). + +topic_matches(PatternKey, RoutingKey) -> + P = split_topic_key(PatternKey), + R = split_topic_key(RoutingKey), + topic_matches1(P, R). + +topic_matches1(["#"], _R) -> + true; +topic_matches1(["#" | PTail], R) -> + last_topic_match(PTail, [], lists:reverse(R)); +topic_matches1([], []) -> + true; +topic_matches1(["*" | PatRest], [_ | ValRest]) -> + topic_matches1(PatRest, ValRest); +topic_matches1([PatElement | PatRest], [ValElement | ValRest]) + when PatElement == ValElement -> + topic_matches1(PatRest, ValRest); +topic_matches1(_, _) -> + false. + +last_topic_match(P, R, []) -> + topic_matches1(P, R); +last_topic_match(P, R, [BacktrackNext | BacktrackList]) -> + topic_matches1(P, R) or + last_topic_match(P, [BacktrackNext | R], BacktrackList). + +validate(_X) -> ok. +create(_Tx, _X) -> ok. +recover(_X, _Bs) -> ok. +delete(_Tx, _X, _Bs) -> ok. +add_binding(_Tx, _X, _B) -> ok. +remove_bindings(_Tx, _X, _Bs) -> ok. +assert_args_equivalence(X, Args) -> + rabbit_exchange:assert_args_equivalence(X, Args). diff -Nru rabbitmq-server-1.7.2/src/rabbit_framing_channel.erl rabbitmq-server-2.3.1/src/rabbit_framing_channel.erl --- rabbitmq-server-1.7.2/src/rabbit_framing_channel.erl 2010-02-15 16:01:03.000000000 +0000 +++ rabbitmq-server-2.3.1/src/rabbit_framing_channel.erl 1970-01-01 00:00:00.000000000 +0000 @@ -1,123 +0,0 @@ -%% The contents of this file are subject to the Mozilla Public License -%% Version 1.1 (the "License"); you may not use this file except in -%% compliance with the License. You may obtain a copy of the License at -%% http://www.mozilla.org/MPL/ -%% -%% Software distributed under the License is distributed on an "AS IS" -%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the -%% License for the specific language governing rights and limitations -%% under the License. -%% -%% The Original Code is RabbitMQ. -%% -%% The Initial Developers of the Original Code are LShift Ltd, -%% Cohesive Financial Technologies LLC, and Rabbit Technologies Ltd. -%% -%% Portions created before 22-Nov-2008 00:00:00 GMT by LShift Ltd, -%% Cohesive Financial Technologies LLC, or Rabbit Technologies Ltd -%% are Copyright (C) 2007-2008 LShift Ltd, Cohesive Financial -%% Technologies LLC, and Rabbit Technologies Ltd. -%% -%% Portions created by LShift Ltd are Copyright (C) 2007-2010 LShift -%% Ltd. Portions created by Cohesive Financial Technologies LLC are -%% Copyright (C) 2007-2010 Cohesive Financial Technologies -%% LLC. Portions created by Rabbit Technologies Ltd are Copyright -%% (C) 2007-2010 Rabbit Technologies Ltd. -%% -%% All Rights Reserved. -%% -%% Contributor(s): ______________________________________. -%% - --module(rabbit_framing_channel). --include("rabbit.hrl"). - --export([start_link/2, process/2, shutdown/1]). - -%% internal --export([mainloop/1]). - -%%-------------------------------------------------------------------- - -start_link(StartFun, StartArgs) -> - spawn_link( - fun () -> - %% we trap exits so that a normal termination of the - %% channel or reader process terminates us too. - process_flag(trap_exit, true), - mainloop(apply(StartFun, StartArgs)) - end). - -process(Pid, Frame) -> - Pid ! {frame, Frame}, - ok. - -shutdown(Pid) -> - Pid ! terminate, - ok. - -%%-------------------------------------------------------------------- - -read_frame(ChannelPid) -> - receive - %% converting the exit signal into one of our own ensures that - %% the reader sees the right pid (i.e. ours) when a channel - %% exits. Similarly in the other direction, though it is not - %% really relevant there since the channel is not specifically - %% watching out for reader exit signals. - {'EXIT', _Pid, Reason} -> exit(Reason); - {frame, Frame} -> Frame; - terminate -> rabbit_channel:shutdown(ChannelPid), - read_frame(ChannelPid); - Msg -> exit({unexpected_message, Msg}) - end. - -mainloop(ChannelPid) -> - {method, MethodName, FieldsBin} = read_frame(ChannelPid), - Method = rabbit_framing:decode_method_fields(MethodName, FieldsBin), - case rabbit_framing:method_has_content(MethodName) of - true -> rabbit_channel:do(ChannelPid, Method, - collect_content(ChannelPid, MethodName)); - false -> rabbit_channel:do(ChannelPid, Method) - end, - ?MODULE:mainloop(ChannelPid). - -collect_content(ChannelPid, MethodName) -> - {ClassId, _MethodId} = rabbit_framing:method_id(MethodName), - case read_frame(ChannelPid) of - {content_header, HeaderClassId, 0, BodySize, PropertiesBin} -> - if HeaderClassId == ClassId -> - Payload = collect_content_payload(ChannelPid, BodySize, []), - #content{class_id = ClassId, - properties = none, - properties_bin = PropertiesBin, - payload_fragments_rev = Payload}; - true -> - rabbit_misc:protocol_error( - command_invalid, - "expected content header for class ~w, " - "got one for class ~w instead", - [ClassId, HeaderClassId]) - end; - _ -> - rabbit_misc:protocol_error( - command_invalid, - "expected content header for class ~w, " - "got non content header frame instead", - [ClassId]) - end. - -collect_content_payload(_ChannelPid, 0, Acc) -> - Acc; -collect_content_payload(ChannelPid, RemainingByteCount, Acc) -> - case read_frame(ChannelPid) of - {content_body, FragmentBin} -> - collect_content_payload(ChannelPid, - RemainingByteCount - size(FragmentBin), - [FragmentBin | Acc]); - _ -> - rabbit_misc:protocol_error( - command_invalid, - "expected content body, got non content body frame instead", - []) - end. diff -Nru rabbitmq-server-1.7.2/src/rabbit_framing.erl rabbitmq-server-2.3.1/src/rabbit_framing.erl --- rabbitmq-server-1.7.2/src/rabbit_framing.erl 1970-01-01 00:00:00.000000000 +0000 +++ rabbitmq-server-2.3.1/src/rabbit_framing.erl 2011-02-03 12:47:35.000000000 +0000 @@ -0,0 +1,49 @@ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License +%% at http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and +%% limitations under the License. +%% +%% The Original Code is RabbitMQ. +%% +%% The Initial Developer of the Original Code is VMware, Inc. +%% Copyright (c) 2007-2011 VMware, Inc. All rights reserved. +%% + +%% TODO auto-generate + +-module(rabbit_framing). + +-ifdef(use_specs). + +-export_type([protocol/0, + amqp_field_type/0, amqp_property_type/0, + amqp_table/0, amqp_array/0, amqp_value/0, + amqp_method_name/0, amqp_method/0, amqp_method_record/0, + amqp_method_field_name/0, amqp_property_record/0, + amqp_exception/0, amqp_exception_code/0, amqp_class_id/0]). + +-type(protocol() :: 'rabbit_framing_amqp_0_8' | 'rabbit_framing_amqp_0_9_1'). + +-define(protocol_type(T), type(T :: rabbit_framing_amqp_0_8:T | + rabbit_framing_amqp_0_9_1:T)). + +-?protocol_type(amqp_field_type()). +-?protocol_type(amqp_property_type()). +-?protocol_type(amqp_table()). +-?protocol_type(amqp_array()). +-?protocol_type(amqp_value()). +-?protocol_type(amqp_method_name()). +-?protocol_type(amqp_method()). +-?protocol_type(amqp_method_record()). +-?protocol_type(amqp_method_field_name()). +-?protocol_type(amqp_property_record()). +-?protocol_type(amqp_exception()). +-?protocol_type(amqp_exception_code()). +-?protocol_type(amqp_class_id()). + +-endif. diff -Nru rabbitmq-server-1.7.2/src/rabbit_guid.erl rabbitmq-server-2.3.1/src/rabbit_guid.erl --- rabbitmq-server-1.7.2/src/rabbit_guid.erl 2010-02-15 16:01:03.000000000 +0000 +++ rabbitmq-server-2.3.1/src/rabbit_guid.erl 2011-02-03 12:47:35.000000000 +0000 @@ -1,45 +1,28 @@ -%% The contents of this file are subject to the Mozilla Public License -%% Version 1.1 (the "License"); you may not use this file except in -%% compliance with the License. You may obtain a copy of the License at -%% http://www.mozilla.org/MPL/ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License +%% at http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and +%% limitations under the License. %% -%% Software distributed under the License is distributed on an "AS IS" -%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the -%% License for the specific language governing rights and limitations -%% under the License. +%% The Original Code is RabbitMQ. %% -%% The Original Code is RabbitMQ. -%% -%% The Initial Developers of the Original Code are LShift Ltd, -%% Cohesive Financial Technologies LLC, and Rabbit Technologies Ltd. -%% -%% Portions created before 22-Nov-2008 00:00:00 GMT by LShift Ltd, -%% Cohesive Financial Technologies LLC, or Rabbit Technologies Ltd -%% are Copyright (C) 2007-2008 LShift Ltd, Cohesive Financial -%% Technologies LLC, and Rabbit Technologies Ltd. -%% -%% Portions created by LShift Ltd are Copyright (C) 2007-2010 LShift -%% Ltd. Portions created by Cohesive Financial Technologies LLC are -%% Copyright (C) 2007-2010 Cohesive Financial Technologies -%% LLC. Portions created by Rabbit Technologies Ltd are Copyright -%% (C) 2007-2010 Rabbit Technologies Ltd. -%% -%% All Rights Reserved. -%% -%% Contributor(s): ______________________________________. +%% The Initial Developer of the Original Code is VMware, Inc. +%% Copyright (c) 2007-2011 VMware, Inc. All rights reserved. %% -module(rabbit_guid). --include("rabbit.hrl"). - -behaviour(gen_server). -export([start_link/0]). -export([guid/0, string_guid/1, binstring_guid/1]). --export([init/1, handle_call/3, handle_cast/2, handle_info/2, - terminate/2, code_change/3]). +-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2, + code_change/3]). -define(SERVER, ?MODULE). -define(SERIAL_FILENAME, "rabbit_serial"). @@ -50,7 +33,11 @@ -ifdef(use_specs). --spec(start_link/0 :: () -> {'ok', pid()} | 'ignore' | {'error', any()}). +-export_type([guid/0]). + +-type(guid() :: binary()). + +-spec(start_link/0 :: () -> rabbit_types:ok_pid_or_error()). -spec(guid/0 :: () -> guid()). -spec(string_guid/1 :: (any()) -> string()). -spec(binstring_guid/1 :: (any()) -> binary()). @@ -67,7 +54,7 @@ Filename = filename:join(rabbit_mnesia:dir(), ?SERIAL_FILENAME), Serial = case rabbit_misc:read_term_file(Filename) of {ok, [Num]} -> Num; - {error, enoent} -> rabbit_persister:serial(); + {error, enoent} -> 0; {error, Reason} -> throw({error, {cannot_read_serial_file, Filename, Reason}}) end, @@ -78,7 +65,7 @@ end, Serial. -%% generate a guid that is monotonically increasing per process. +%% generate a GUID. %% %% The id is only unique within a single cluster and as long as the %% serial store hasn't been deleted. @@ -92,20 +79,18 @@ %% A persisted serial number, in combination with self/0 (which %% includes the node name) uniquely identifies a process in space %% and time. We combine that with a process-local counter to give - %% us a GUID that is monotonically increasing per process. + %% us a GUID. G = case get(guid) of undefined -> {{gen_server:call(?SERVER, serial, infinity), self()}, 0}; {S, I} -> {S, I+1} end, put(guid, G), - G. + erlang:md5(term_to_binary(G)). -%% generate a readable string representation of a guid. Note that any -%% monotonicity of the guid is not preserved in the encoding. +%% generate a readable string representation of a GUID. string_guid(Prefix) -> - Prefix ++ "-" ++ base64:encode_to_string( - erlang:md5(term_to_binary(guid()))). + Prefix ++ "-" ++ base64:encode_to_string(guid()). binstring_guid(Prefix) -> list_to_binary(string_guid(Prefix)). diff -Nru rabbitmq-server-1.7.2/src/rabbit_heartbeat.erl rabbitmq-server-2.3.1/src/rabbit_heartbeat.erl --- rabbitmq-server-1.7.2/src/rabbit_heartbeat.erl 2010-02-15 16:01:03.000000000 +0000 +++ rabbitmq-server-2.3.1/src/rabbit_heartbeat.erl 2011-02-03 12:47:35.000000000 +0000 @@ -1,100 +1,149 @@ -%% The contents of this file are subject to the Mozilla Public License -%% Version 1.1 (the "License"); you may not use this file except in -%% compliance with the License. You may obtain a copy of the License at -%% http://www.mozilla.org/MPL/ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License +%% at http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and +%% limitations under the License. %% -%% Software distributed under the License is distributed on an "AS IS" -%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the -%% License for the specific language governing rights and limitations -%% under the License. +%% The Original Code is RabbitMQ. %% -%% The Original Code is RabbitMQ. -%% -%% The Initial Developers of the Original Code are LShift Ltd, -%% Cohesive Financial Technologies LLC, and Rabbit Technologies Ltd. -%% -%% Portions created before 22-Nov-2008 00:00:00 GMT by LShift Ltd, -%% Cohesive Financial Technologies LLC, or Rabbit Technologies Ltd -%% are Copyright (C) 2007-2008 LShift Ltd, Cohesive Financial -%% Technologies LLC, and Rabbit Technologies Ltd. -%% -%% Portions created by LShift Ltd are Copyright (C) 2007-2010 LShift -%% Ltd. Portions created by Cohesive Financial Technologies LLC are -%% Copyright (C) 2007-2010 Cohesive Financial Technologies -%% LLC. Portions created by Rabbit Technologies Ltd are Copyright -%% (C) 2007-2010 Rabbit Technologies Ltd. -%% -%% All Rights Reserved. -%% -%% Contributor(s): ______________________________________. +%% The Initial Developer of the Original Code is VMware, Inc. +%% Copyright (c) 2007-2011 VMware, Inc. All rights reserved. %% -module(rabbit_heartbeat). --export([start_heartbeat/2]). +-export([start_heartbeat_sender/3, start_heartbeat_receiver/3, + start_heartbeat_fun/1, pause_monitor/1, resume_monitor/1]). -start_heartbeat(_Sock, 0) -> - none; -start_heartbeat(Sock, TimeoutSec) -> - Parent = self(), - %% we check for incoming data every interval, and time out after - %% two checks with no change. As a result we will time out between - %% 2 and 3 intervals after the last data has been received. - spawn_link(fun () -> heartbeater(Sock, TimeoutSec * 1000, - recv_oct, 1, - fun () -> - Parent ! timeout, - stop - end, - erlang:monitor(process, Parent)) end), +-include("rabbit.hrl"). + +%%---------------------------------------------------------------------------- + +-ifdef(use_specs). + +-export_type([heartbeaters/0]). +-export_type([start_heartbeat_fun/0]). + +-type(heartbeaters() :: {rabbit_types:maybe(pid()), rabbit_types:maybe(pid())}). + +-type(heartbeat_callback() :: fun (() -> any())). + +-type(start_heartbeat_fun() :: + fun((rabbit_net:socket(), non_neg_integer(), heartbeat_callback(), + non_neg_integer(), heartbeat_callback()) -> + no_return())). + +-spec(start_heartbeat_sender/3 :: + (rabbit_net:socket(), non_neg_integer(), heartbeat_callback()) -> + rabbit_types:ok(pid())). +-spec(start_heartbeat_receiver/3 :: + (rabbit_net:socket(), non_neg_integer(), heartbeat_callback()) -> + rabbit_types:ok(pid())). + +-spec(start_heartbeat_fun/1 :: + (pid()) -> start_heartbeat_fun()). + + +-spec(pause_monitor/1 :: (heartbeaters()) -> 'ok'). +-spec(resume_monitor/1 :: (heartbeaters()) -> 'ok'). + +-endif. + +%%---------------------------------------------------------------------------- + +start_heartbeat_sender(Sock, TimeoutSec, SendFun) -> %% the 'div 2' is there so that we don't end up waiting for nearly %% 2 * TimeoutSec before sending a heartbeat in the boundary case %% where the last message was sent just after a heartbeat. - spawn_link(fun () -> heartbeater(Sock, TimeoutSec * 1000 div 2, - send_oct, 0, - fun () -> - catch rabbit_net:send(Sock, rabbit_binary_generator:build_heartbeat_frame()), - continue - end, - erlang:monitor(process, Parent)) end), + heartbeater( + {Sock, TimeoutSec * 1000 div 2, send_oct, 0, + fun () -> + SendFun(), + continue + end}). + +start_heartbeat_receiver(Sock, TimeoutSec, ReceiveFun) -> + %% we check for incoming data every interval, and time out after + %% two checks with no change. As a result we will time out between + %% 2 and 3 intervals after the last data has been received. + heartbeater({Sock, TimeoutSec * 1000, recv_oct, 1, fun () -> + ReceiveFun(), + stop + end}). + +start_heartbeat_fun(SupPid) -> + fun (Sock, SendTimeoutSec, SendFun, ReceiveTimeoutSec, ReceiveFun) -> + {ok, Sender} = + start_heartbeater(SendTimeoutSec, SupPid, Sock, + SendFun, heartbeat_sender, + start_heartbeat_sender), + {ok, Receiver} = + start_heartbeater(ReceiveTimeoutSec, SupPid, Sock, + ReceiveFun, heartbeat_receiver, + start_heartbeat_receiver), + {Sender, Receiver} + end. + +pause_monitor({_Sender, none}) -> + ok; +pause_monitor({_Sender, Receiver}) -> + Receiver ! pause, ok. -%% Y-combinator, posted by Vladimir Sekissov to the Erlang mailing list -%% http://www.erlang.org/ml-archive/erlang-questions/200301/msg00053.html -y(X) -> - F = fun (P) -> X(fun (A) -> (P(P))(A) end) end, - F(F). - -heartbeater(Sock, TimeoutMillisec, StatName, Threshold, Handler, MonitorRef) -> - Heartbeat = - fun (F) -> - fun ({StatVal, SameCount}) -> - receive - {'DOWN', MonitorRef, process, _Object, _Info} -> ok; - Other -> exit({unexpected_message, Other}) - after TimeoutMillisec -> - case rabbit_net:getstat(Sock, [StatName]) of - {ok, [{StatName, NewStatVal}]} -> - if NewStatVal =/= StatVal -> - F({NewStatVal, 0}); - SameCount < Threshold -> - F({NewStatVal, SameCount + 1}); - true -> - case Handler() of - stop -> ok; - continue -> F({NewStatVal, 0}) - end - end; - {error, einval} -> - %% the socket is dead, most - %% likely because the - %% connection is being shut - %% down -> terminate - ok; - {error, Reason} -> - exit({cannot_get_socket_stats, Reason}) - end - end - end - end, - (y(Heartbeat))({0, 0}). +resume_monitor({_Sender, none}) -> + ok; +resume_monitor({_Sender, Receiver}) -> + Receiver ! resume, + ok. + +%%---------------------------------------------------------------------------- +start_heartbeater(0, _SupPid, _Sock, _TimeoutFun, _Name, _Callback) -> + {ok, none}; +start_heartbeater(TimeoutSec, SupPid, Sock, TimeoutFun, Name, Callback) -> + supervisor2:start_child( + SupPid, {Name, + {rabbit_heartbeat, Callback, + [Sock, TimeoutSec, TimeoutFun]}, + transient, ?MAX_WAIT, worker, [rabbit_heartbeat]}). + +heartbeater(Params) -> + {ok, proc_lib:spawn_link(fun () -> heartbeater(Params, {0, 0}) end)}. + +heartbeater({Sock, TimeoutMillisec, StatName, Threshold, Handler} = Params, + {StatVal, SameCount}) -> + Recurse = fun (V) -> heartbeater(Params, V) end, + receive + pause -> + receive + resume -> + Recurse({0, 0}); + Other -> + exit({unexpected_message, Other}) + end; + Other -> + exit({unexpected_message, Other}) + after TimeoutMillisec -> + case rabbit_net:getstat(Sock, [StatName]) of + {ok, [{StatName, NewStatVal}]} -> + if NewStatVal =/= StatVal -> + Recurse({NewStatVal, 0}); + SameCount < Threshold -> + Recurse({NewStatVal, SameCount + 1}); + true -> + case Handler() of + stop -> ok; + continue -> Recurse({NewStatVal, 0}) + end + end; + {error, einval} -> + %% the socket is dead, most likely because the + %% connection is being shut down -> terminate + ok; + {error, Reason} -> + exit({cannot_get_socket_stats, Reason}) + end + end. diff -Nru rabbitmq-server-1.7.2/src/rabbit_hooks.erl rabbitmq-server-2.3.1/src/rabbit_hooks.erl --- rabbitmq-server-1.7.2/src/rabbit_hooks.erl 2010-02-15 16:01:03.000000000 +0000 +++ rabbitmq-server-2.3.1/src/rabbit_hooks.erl 1970-01-01 00:00:00.000000000 +0000 @@ -1,73 +0,0 @@ -%% The contents of this file are subject to the Mozilla Public License -%% Version 1.1 (the "License"); you may not use this file except in -%% compliance with the License. You may obtain a copy of the License at -%% http://www.mozilla.org/MPL/ -%% -%% Software distributed under the License is distributed on an "AS IS" -%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the -%% License for the specific language governing rights and limitations -%% under the License. -%% -%% The Original Code is RabbitMQ. -%% -%% The Initial Developers of the Original Code are LShift Ltd, -%% Cohesive Financial Technologies LLC, and Rabbit Technologies Ltd. -%% -%% Portions created before 22-Nov-2008 00:00:00 GMT by LShift Ltd, -%% Cohesive Financial Technologies LLC, or Rabbit Technologies Ltd -%% are Copyright (C) 2007-2008 LShift Ltd, Cohesive Financial -%% Technologies LLC, and Rabbit Technologies Ltd. -%% -%% Portions created by LShift Ltd are Copyright (C) 2007-2010 LShift -%% Ltd. Portions created by Cohesive Financial Technologies LLC are -%% Copyright (C) 2007-2010 Cohesive Financial Technologies -%% LLC. Portions created by Rabbit Technologies Ltd are Copyright -%% (C) 2007-2010 Rabbit Technologies Ltd. -%% -%% All Rights Reserved. -%% -%% Contributor(s): ______________________________________. -%% - --module(rabbit_hooks). - --export([start/0]). --export([subscribe/3, unsubscribe/2, trigger/2, notify_remote/5]). - --define(TableName, rabbit_hooks). - --ifdef(use_specs). - --spec(start/0 :: () -> 'ok'). --spec(subscribe/3 :: (atom(), atom(), {atom(), atom(), list()}) -> 'ok'). --spec(unsubscribe/2 :: (atom(), atom()) -> 'ok'). --spec(trigger/2 :: (atom(), list()) -> 'ok'). --spec(notify_remote/5 :: (atom(), atom(), list(), pid(), list()) -> 'ok'). - --endif. - -start() -> - ets:new(?TableName, [bag, public, named_table]), - ok. - -subscribe(Hook, HandlerName, Handler) -> - ets:insert(?TableName, {Hook, HandlerName, Handler}), - ok. - -unsubscribe(Hook, HandlerName) -> - ets:match_delete(?TableName, {Hook, HandlerName, '_'}), - ok. - -trigger(Hook, Args) -> - Hooks = ets:lookup(?TableName, Hook), - [case catch apply(M, F, [Hook, Name, Args | A]) of - {'EXIT', Reason} -> - rabbit_log:warning("Failed to execute handler ~p for hook ~p: ~p", - [Name, Hook, Reason]); - _ -> ok - end || {_, Name, {M, F, A}} <- Hooks], - ok. - -notify_remote(Hook, HandlerName, Args, Pid, PidArgs) -> - Pid ! {rabbitmq_hook, [Hook, HandlerName, Args | PidArgs]}, - ok. diff -Nru rabbitmq-server-1.7.2/src/rabbit_limiter.erl rabbitmq-server-2.3.1/src/rabbit_limiter.erl --- rabbitmq-server-1.7.2/src/rabbit_limiter.erl 2010-02-15 16:01:03.000000000 +0000 +++ rabbitmq-server-2.3.1/src/rabbit_limiter.erl 2011-02-03 12:47:35.000000000 +0000 @@ -1,32 +1,17 @@ -%% The contents of this file are subject to the Mozilla Public License -%% Version 1.1 (the "License"); you may not use this file except in -%% compliance with the License. You may obtain a copy of the License at -%% http://www.mozilla.org/MPL/ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License +%% at http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and +%% limitations under the License. %% -%% Software distributed under the License is distributed on an "AS IS" -%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the -%% License for the specific language governing rights and limitations -%% under the License. +%% The Original Code is RabbitMQ. %% -%% The Original Code is RabbitMQ. -%% -%% The Initial Developers of the Original Code are LShift Ltd, -%% Cohesive Financial Technologies LLC, and Rabbit Technologies Ltd. -%% -%% Portions created before 22-Nov-2008 00:00:00 GMT by LShift Ltd, -%% Cohesive Financial Technologies LLC, or Rabbit Technologies Ltd -%% are Copyright (C) 2007-2008 LShift Ltd, Cohesive Financial -%% Technologies LLC, and Rabbit Technologies Ltd. -%% -%% Portions created by LShift Ltd are Copyright (C) 2007-2010 LShift -%% Ltd. Portions created by Cohesive Financial Technologies LLC are -%% Copyright (C) 2007-2010 Cohesive Financial Technologies -%% LLC. Portions created by Rabbit Technologies Ltd are Copyright -%% (C) 2007-2010 Rabbit Technologies Ltd. -%% -%% All Rights Reserved. -%% -%% Contributor(s): ______________________________________. +%% The Initial Developer of the Original Code is VMware, Inc. +%% Copyright (c) 2007-2011 VMware, Inc. All rights reserved. %% -module(rabbit_limiter). @@ -34,10 +19,10 @@ -behaviour(gen_server2). -export([init/1, terminate/2, code_change/3, handle_call/3, handle_cast/2, - handle_info/2]). --export([start_link/2, shutdown/1]). + handle_info/2, prioritise_call/3]). +-export([start_link/2]). -export([limit/2, can_send/3, ack/2, register/2, unregister/2]). --export([get_limit/1]). +-export([get_limit/1, block/1, unblock/1, is_blocked/1]). %%---------------------------------------------------------------------------- @@ -45,14 +30,17 @@ -type(maybe_pid() :: pid() | 'undefined'). --spec(start_link/2 :: (pid(), non_neg_integer()) -> pid()). --spec(shutdown/1 :: (maybe_pid()) -> 'ok'). --spec(limit/2 :: (maybe_pid(), non_neg_integer()) -> 'ok'). +-spec(start_link/2 :: (pid(), non_neg_integer()) -> + rabbit_types:ok_pid_or_error()). +-spec(limit/2 :: (maybe_pid(), non_neg_integer()) -> 'ok' | 'stopped'). -spec(can_send/3 :: (maybe_pid(), pid(), boolean()) -> boolean()). -spec(ack/2 :: (maybe_pid(), non_neg_integer()) -> 'ok'). -spec(register/2 :: (maybe_pid(), pid()) -> 'ok'). -spec(unregister/2 :: (maybe_pid(), pid()) -> 'ok'). -spec(get_limit/1 :: (maybe_pid()) -> non_neg_integer()). +-spec(block/1 :: (maybe_pid()) -> 'ok'). +-spec(unblock/1 :: (maybe_pid()) -> 'ok' | 'stopped'). +-spec(is_blocked/1 :: (maybe_pid()) -> boolean()). -endif. @@ -60,6 +48,7 @@ -record(lim, {prefetch_count = 0, ch_pid, + blocked = false, queues = dict:new(), % QPid -> {MonitorRef, Notify} volume = 0}). %% 'Notify' is a boolean that indicates whether a queue should be @@ -71,19 +60,12 @@ %%---------------------------------------------------------------------------- start_link(ChPid, UnackedMsgCount) -> - {ok, Pid} = gen_server2:start_link(?MODULE, [ChPid, UnackedMsgCount], []), - Pid. - -shutdown(undefined) -> - ok; -shutdown(LimiterPid) -> - unlink(LimiterPid), - gen_server2:cast(LimiterPid, shutdown). + gen_server2:start_link(?MODULE, [ChPid, UnackedMsgCount], []). limit(undefined, 0) -> ok; limit(LimiterPid, PrefetchCount) -> - gen_server2:cast(LimiterPid, {limit, PrefetchCount}). + gen_server2:call(LimiterPid, {limit, PrefetchCount}). %% Ask the limiter whether the queue can deliver a message without %% breaching a limit @@ -111,7 +93,22 @@ get_limit(Pid) -> rabbit_misc:with_exit_handler( fun () -> 0 end, - fun () -> gen_server2:pcall(Pid, 9, get_limit, infinity) end). + fun () -> gen_server2:call(Pid, get_limit, infinity) end). + +block(undefined) -> + ok; +block(LimiterPid) -> + gen_server2:call(LimiterPid, block, infinity). + +unblock(undefined) -> + ok; +unblock(LimiterPid) -> + gen_server2:call(LimiterPid, unblock, infinity). + +is_blocked(undefined) -> + false; +is_blocked(LimiterPid) -> + gen_server2:call(LimiterPid, is_blocked, infinity). %%---------------------------------------------------------------------------- %% gen_server callbacks @@ -120,29 +117,48 @@ init([ChPid, UnackedMsgCount]) -> {ok, #lim{ch_pid = ChPid, volume = UnackedMsgCount}}. +prioritise_call(get_limit, _From, _State) -> 9; +prioritise_call(_Msg, _From, _State) -> 0. + +handle_call({can_send, _QPid, _AckRequired}, _From, + State = #lim{blocked = true}) -> + {reply, false, State}; handle_call({can_send, QPid, AckRequired}, _From, State = #lim{volume = Volume}) -> case limit_reached(State) of true -> {reply, false, limit_queue(QPid, State)}; - false -> {reply, true, State#lim{volume = if AckRequired -> Volume + 1; - true -> Volume - end}} + false -> {reply, true, State#lim{volume = if AckRequired -> Volume + 1; + true -> Volume + end}} end; handle_call(get_limit, _From, State = #lim{prefetch_count = PrefetchCount}) -> - {reply, PrefetchCount, State}. + {reply, PrefetchCount, State}; -handle_cast(shutdown, State) -> - {stop, normal, State}; +handle_call({limit, PrefetchCount}, _From, State) -> + case maybe_notify(State, State#lim{prefetch_count = PrefetchCount}) of + {cont, State1} -> {reply, ok, State1}; + {stop, State1} -> {stop, normal, stopped, State1} + end; -handle_cast({limit, PrefetchCount}, State) -> - {noreply, maybe_notify(State, State#lim{prefetch_count = PrefetchCount})}; +handle_call(block, _From, State) -> + {reply, ok, State#lim{blocked = true}}; + +handle_call(unblock, _From, State) -> + case maybe_notify(State, State#lim{blocked = false}) of + {cont, State1} -> {reply, ok, State1}; + {stop, State1} -> {stop, normal, stopped, State1} + end; + +handle_call(is_blocked, _From, State) -> + {reply, blocked(State), State}. handle_cast({ack, Count}, State = #lim{volume = Volume}) -> NewVolume = if Volume == 0 -> 0; true -> Volume - Count end, - {noreply, maybe_notify(State, State#lim{volume = NewVolume})}; + {cont, State1} = maybe_notify(State, State#lim{volume = NewVolume}), + {noreply, State1}; handle_cast({register, QPid}, State) -> {noreply, remember_queue(QPid, State)}; @@ -164,14 +180,21 @@ %%---------------------------------------------------------------------------- maybe_notify(OldState, NewState) -> - case limit_reached(OldState) andalso not(limit_reached(NewState)) of - true -> notify_queues(NewState); - false -> NewState + case (limit_reached(OldState) orelse blocked(OldState)) andalso + not (limit_reached(NewState) orelse blocked(NewState)) of + true -> NewState1 = notify_queues(NewState), + {case NewState1#lim.prefetch_count of + 0 -> stop; + _ -> cont + end, NewState1}; + false -> {cont, NewState} end. limit_reached(#lim{prefetch_count = Limit, volume = Volume}) -> Limit =/= 0 andalso Volume >= Limit. +blocked(#lim{blocked = Blocked}) -> Blocked. + remember_queue(QPid, State = #lim{queues = Queues}) -> case dict:is_key(QPid, Queues) of false -> MRef = erlang:monitor(process, QPid), diff -Nru rabbitmq-server-1.7.2/src/rabbit_load.erl rabbitmq-server-2.3.1/src/rabbit_load.erl --- rabbitmq-server-1.7.2/src/rabbit_load.erl 2010-02-15 16:01:03.000000000 +0000 +++ rabbitmq-server-2.3.1/src/rabbit_load.erl 1970-01-01 00:00:00.000000000 +0000 @@ -1,79 +0,0 @@ -%% The contents of this file are subject to the Mozilla Public License -%% Version 1.1 (the "License"); you may not use this file except in -%% compliance with the License. You may obtain a copy of the License at -%% http://www.mozilla.org/MPL/ -%% -%% Software distributed under the License is distributed on an "AS IS" -%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the -%% License for the specific language governing rights and limitations -%% under the License. -%% -%% The Original Code is RabbitMQ. -%% -%% The Initial Developers of the Original Code are LShift Ltd, -%% Cohesive Financial Technologies LLC, and Rabbit Technologies Ltd. -%% -%% Portions created before 22-Nov-2008 00:00:00 GMT by LShift Ltd, -%% Cohesive Financial Technologies LLC, or Rabbit Technologies Ltd -%% are Copyright (C) 2007-2008 LShift Ltd, Cohesive Financial -%% Technologies LLC, and Rabbit Technologies Ltd. -%% -%% Portions created by LShift Ltd are Copyright (C) 2007-2010 LShift -%% Ltd. Portions created by Cohesive Financial Technologies LLC are -%% Copyright (C) 2007-2010 Cohesive Financial Technologies -%% LLC. Portions created by Rabbit Technologies Ltd are Copyright -%% (C) 2007-2010 Rabbit Technologies Ltd. -%% -%% All Rights Reserved. -%% -%% Contributor(s): ______________________________________. -%% - --module(rabbit_load). - --export([local_load/0, remote_loads/0, pick/0]). - --define(FUDGE_FACTOR, 0.98). --define(TIMEOUT, 100). - -%%---------------------------------------------------------------------------- - --ifdef(use_specs). - --type(erlang_node() :: atom()). --type(load() :: {{non_neg_integer(), integer() | 'unknown'}, erlang_node()}). --spec(local_load/0 :: () -> load()). --spec(remote_loads/0 :: () -> [load()]). --spec(pick/0 :: () -> erlang_node()). - --endif. - -%%---------------------------------------------------------------------------- - -local_load() -> - LoadAvg = case whereis(cpu_sup) of - undefined -> unknown; - _ -> case cpu_sup:avg1() of - L when is_integer(L) -> L; - {error, timeout} -> unknown - end - end, - {{statistics(run_queue), LoadAvg}, node()}. - -remote_loads() -> - {ResL, _BadNodes} = - rpc:multicall(nodes(), ?MODULE, local_load, [], ?TIMEOUT), - ResL. - -pick() -> - RemoteLoads = remote_loads(), - {{RunQ, LoadAvg}, Node} = local_load(), - %% add bias towards current node; we rely on Erlang's term order - %% of SomeFloat < local_unknown < unknown. - AdjustedLoadAvg = case LoadAvg of - unknown -> local_unknown; - _ -> LoadAvg * ?FUDGE_FACTOR - end, - Loads = [{{RunQ, AdjustedLoadAvg}, Node} | RemoteLoads], - {_, SelectedNode} = lists:min(Loads), - SelectedNode. diff -Nru rabbitmq-server-1.7.2/src/rabbit_log.erl rabbitmq-server-2.3.1/src/rabbit_log.erl --- rabbitmq-server-1.7.2/src/rabbit_log.erl 2010-02-15 16:01:03.000000000 +0000 +++ rabbitmq-server-2.3.1/src/rabbit_log.erl 2011-02-03 12:47:35.000000000 +0000 @@ -1,32 +1,17 @@ -%% The contents of this file are subject to the Mozilla Public License -%% Version 1.1 (the "License"); you may not use this file except in -%% compliance with the License. You may obtain a copy of the License at -%% http://www.mozilla.org/MPL/ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License +%% at http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and +%% limitations under the License. %% -%% Software distributed under the License is distributed on an "AS IS" -%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the -%% License for the specific language governing rights and limitations -%% under the License. +%% The Original Code is RabbitMQ. %% -%% The Original Code is RabbitMQ. -%% -%% The Initial Developers of the Original Code are LShift Ltd, -%% Cohesive Financial Technologies LLC, and Rabbit Technologies Ltd. -%% -%% Portions created before 22-Nov-2008 00:00:00 GMT by LShift Ltd, -%% Cohesive Financial Technologies LLC, or Rabbit Technologies Ltd -%% are Copyright (C) 2007-2008 LShift Ltd, Cohesive Financial -%% Technologies LLC, and Rabbit Technologies Ltd. -%% -%% Portions created by LShift Ltd are Copyright (C) 2007-2010 LShift -%% Ltd. Portions created by Cohesive Financial Technologies LLC are -%% Copyright (C) 2007-2010 Cohesive Financial Technologies -%% LLC. Portions created by Rabbit Technologies Ltd are Copyright -%% (C) 2007-2010 Rabbit Technologies Ltd. -%% -%% All Rights Reserved. -%% -%% Contributor(s): ______________________________________. +%% The Initial Developer of the Original Code is VMware, Inc. +%% Copyright (c) 2007-2011 VMware, Inc. All rights reserved. %% -module(rabbit_log). @@ -41,16 +26,13 @@ -export([debug/1, debug/2, message/4, info/1, info/2, warning/1, warning/2, error/1, error/2]). --import(io). --import(error_logger). - -define(SERVER, ?MODULE). %%---------------------------------------------------------------------------- -ifdef(use_specs). --spec(start_link/0 :: () -> {'ok', pid()} | 'ignore' | {'error', any()}). +-spec(start_link/0 :: () -> rabbit_types:ok_pid_or_error()). -spec(debug/1 :: (string()) -> 'ok'). -spec(debug/2 :: (string(), [any()]) -> 'ok'). -spec(info/1 :: (string()) -> 'ok'). diff -Nru rabbitmq-server-1.7.2/src/rabbit_memory_monitor.erl rabbitmq-server-2.3.1/src/rabbit_memory_monitor.erl --- rabbitmq-server-1.7.2/src/rabbit_memory_monitor.erl 1970-01-01 00:00:00.000000000 +0000 +++ rabbitmq-server-2.3.1/src/rabbit_memory_monitor.erl 2011-02-03 12:47:35.000000000 +0000 @@ -0,0 +1,280 @@ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License +%% at http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and +%% limitations under the License. +%% +%% The Original Code is RabbitMQ. +%% +%% The Initial Developer of the Original Code is VMware, Inc. +%% Copyright (c) 2007-2011 VMware, Inc. All rights reserved. +%% + + +%% This module handles the node-wide memory statistics. +%% It receives statistics from all queues, counts the desired +%% queue length (in seconds), and sends this information back to +%% queues. + +-module(rabbit_memory_monitor). + +-behaviour(gen_server2). + +-export([start_link/0, update/0, register/2, deregister/1, + report_ram_duration/2, stop/0]). + +-export([init/1, handle_call/3, handle_cast/2, handle_info/2, + terminate/2, code_change/3]). + +-record(process, {pid, reported, sent, callback, monitor}). + +-record(state, {timer, %% 'internal_update' timer + queue_durations, %% ets #process + queue_duration_sum, %% sum of all queue_durations + queue_duration_count, %% number of elements in sum + memory_limit, %% how much memory we intend to use + desired_duration %% the desired queue duration + }). + +-define(SERVER, ?MODULE). +-define(DEFAULT_UPDATE_INTERVAL, 2500). +-define(TABLE_NAME, ?MODULE). + +%% Because we have a feedback loop here, we need to ensure that we +%% have some space for when the queues don't quite respond as fast as +%% we would like, or when there is buffering going on in other parts +%% of the system. In short, we aim to stay some distance away from +%% when the memory alarms will go off, which cause backpressure (of +%% some sort) on producers. Note that all other Thresholds are +%% relative to this scaling. +-define(MEMORY_LIMIT_SCALING, 0.4). + +-define(LIMIT_THRESHOLD, 0.5). %% don't limit queues when mem use is < this + +%% If all queues are pushed to disk (duration 0), then the sum of +%% their reported lengths will be 0. If memory then becomes available, +%% unless we manually intervene, the sum will remain 0, and the queues +%% will never get a non-zero duration. Thus when the mem use is < +%% SUM_INC_THRESHOLD, increase the sum artificially by SUM_INC_AMOUNT. +-define(SUM_INC_THRESHOLD, 0.95). +-define(SUM_INC_AMOUNT, 1.0). + +%% If user disabled vm_memory_monitor, let's assume 1GB of memory we can use. +-define(MEMORY_SIZE_FOR_DISABLED_VMM, 1073741824). + +-define(EPSILON, 0.000001). %% less than this and we clamp to 0 + +%%---------------------------------------------------------------------------- + +-ifdef(use_specs). + +-spec(start_link/0 :: () -> rabbit_types:ok_pid_or_error()). +-spec(update/0 :: () -> 'ok'). +-spec(register/2 :: (pid(), {atom(),atom(),[any()]}) -> 'ok'). +-spec(deregister/1 :: (pid()) -> 'ok'). +-spec(report_ram_duration/2 :: + (pid(), float() | 'infinity') -> number() | 'infinity'). +-spec(stop/0 :: () -> 'ok'). + +-endif. + +%%---------------------------------------------------------------------------- +%% Public API +%%---------------------------------------------------------------------------- + +start_link() -> + gen_server2:start_link({local, ?SERVER}, ?MODULE, [], []). + +update() -> + gen_server2:cast(?SERVER, update). + +register(Pid, MFA = {_M, _F, _A}) -> + gen_server2:call(?SERVER, {register, Pid, MFA}, infinity). + +deregister(Pid) -> + gen_server2:cast(?SERVER, {deregister, Pid}). + +report_ram_duration(Pid, QueueDuration) -> + gen_server2:call(?SERVER, + {report_ram_duration, Pid, QueueDuration}, infinity). + +stop() -> + gen_server2:cast(?SERVER, stop). + +%%---------------------------------------------------------------------------- +%% Gen_server callbacks +%%---------------------------------------------------------------------------- + +init([]) -> + MemoryLimit = trunc(?MEMORY_LIMIT_SCALING * + (try + vm_memory_monitor:get_memory_limit() + catch + exit:{noproc, _} -> ?MEMORY_SIZE_FOR_DISABLED_VMM + end)), + + {ok, TRef} = timer:apply_interval(?DEFAULT_UPDATE_INTERVAL, + ?SERVER, update, []), + + Ets = ets:new(?TABLE_NAME, [set, private, {keypos, #process.pid}]), + + {ok, internal_update( + #state { timer = TRef, + queue_durations = Ets, + queue_duration_sum = 0.0, + queue_duration_count = 0, + memory_limit = MemoryLimit, + desired_duration = infinity })}. + +handle_call({report_ram_duration, Pid, QueueDuration}, From, + State = #state { queue_duration_sum = Sum, + queue_duration_count = Count, + queue_durations = Durations, + desired_duration = SendDuration }) -> + + [Proc = #process { reported = PrevQueueDuration }] = + ets:lookup(Durations, Pid), + + gen_server2:reply(From, SendDuration), + + {Sum1, Count1} = + case {PrevQueueDuration, QueueDuration} of + {infinity, infinity} -> {Sum, Count}; + {infinity, _} -> {Sum + QueueDuration, Count + 1}; + {_, infinity} -> {Sum - PrevQueueDuration, Count - 1}; + {_, _} -> {Sum - PrevQueueDuration + QueueDuration, + Count} + end, + true = ets:insert(Durations, Proc #process { reported = QueueDuration, + sent = SendDuration }), + {noreply, State #state { queue_duration_sum = zero_clamp(Sum1), + queue_duration_count = Count1 }}; + +handle_call({register, Pid, MFA}, _From, + State = #state { queue_durations = Durations }) -> + MRef = erlang:monitor(process, Pid), + true = ets:insert(Durations, #process { pid = Pid, reported = infinity, + sent = infinity, callback = MFA, + monitor = MRef }), + {reply, ok, State}; + +handle_call(_Request, _From, State) -> + {noreply, State}. + +handle_cast(update, State) -> + {noreply, internal_update(State)}; + +handle_cast({deregister, Pid}, State) -> + {noreply, internal_deregister(Pid, true, State)}; + +handle_cast(stop, State) -> + {stop, normal, State}; + +handle_cast(_Request, State) -> + {noreply, State}. + +handle_info({'DOWN', _MRef, process, Pid, _Reason}, State) -> + {noreply, internal_deregister(Pid, false, State)}; + +handle_info(_Info, State) -> + {noreply, State}. + +terminate(_Reason, #state { timer = TRef }) -> + timer:cancel(TRef), + ok. + +code_change(_OldVsn, State, _Extra) -> + {ok, State}. + + +%%---------------------------------------------------------------------------- +%% Internal functions +%%---------------------------------------------------------------------------- + +zero_clamp(Sum) -> + case Sum < ?EPSILON of + true -> 0.0; + false -> Sum + end. + +internal_deregister(Pid, Demonitor, + State = #state { queue_duration_sum = Sum, + queue_duration_count = Count, + queue_durations = Durations }) -> + case ets:lookup(Durations, Pid) of + [] -> State; + [#process { reported = PrevQueueDuration, monitor = MRef }] -> + true = case Demonitor of + true -> erlang:demonitor(MRef); + false -> true + end, + {Sum1, Count1} = + case PrevQueueDuration of + infinity -> {Sum, Count}; + _ -> {zero_clamp(Sum - PrevQueueDuration), + Count - 1} + end, + true = ets:delete(Durations, Pid), + State #state { queue_duration_sum = Sum1, + queue_duration_count = Count1 } + end. + +internal_update(State = #state { memory_limit = Limit, + queue_durations = Durations, + desired_duration = DesiredDurationAvg, + queue_duration_sum = Sum, + queue_duration_count = Count }) -> + MemoryRatio = erlang:memory(total) / Limit, + DesiredDurationAvg1 = + case MemoryRatio < ?LIMIT_THRESHOLD orelse Count == 0 of + true -> + infinity; + false -> + Sum1 = case MemoryRatio < ?SUM_INC_THRESHOLD of + true -> Sum + ?SUM_INC_AMOUNT; + false -> Sum + end, + (Sum1 / Count) / MemoryRatio + end, + State1 = State #state { desired_duration = DesiredDurationAvg1 }, + + %% only inform queues immediately if the desired duration has + %% decreased + case DesiredDurationAvg1 == infinity orelse + (DesiredDurationAvg /= infinity andalso + DesiredDurationAvg1 >= DesiredDurationAvg) of + true -> + ok; + false -> + true = + ets:foldl( + fun (Proc = #process { reported = QueueDuration, + sent = PrevSendDuration, + callback = {M, F, A} }, true) -> + case (case {QueueDuration, PrevSendDuration} of + {infinity, infinity} -> + true; + {infinity, D} -> + DesiredDurationAvg1 < D; + {D, infinity} -> + DesiredDurationAvg1 < D; + {D1, D2} -> + DesiredDurationAvg1 < + lists:min([D1,D2]) + end) of + true -> + ok = erlang:apply( + M, F, A ++ [DesiredDurationAvg1]), + ets:insert( + Durations, + Proc #process {sent = DesiredDurationAvg1}); + false -> + true + end + end, true, Durations) + end, + State1. diff -Nru rabbitmq-server-1.7.2/src/rabbit_misc.erl rabbitmq-server-2.3.1/src/rabbit_misc.erl --- rabbitmq-server-1.7.2/src/rabbit_misc.erl 2010-02-15 16:01:03.000000000 +0000 +++ rabbitmq-server-2.3.1/src/rabbit_misc.erl 2011-02-03 12:47:35.000000000 +0000 @@ -1,136 +1,199 @@ -%% The contents of this file are subject to the Mozilla Public License -%% Version 1.1 (the "License"); you may not use this file except in -%% compliance with the License. You may obtain a copy of the License at -%% http://www.mozilla.org/MPL/ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License +%% at http://www.mozilla.org/MPL/ %% -%% Software distributed under the License is distributed on an "AS IS" -%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the -%% License for the specific language governing rights and limitations -%% under the License. +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and +%% limitations under the License. %% -%% The Original Code is RabbitMQ. +%% The Original Code is RabbitMQ. %% -%% The Initial Developers of the Original Code are LShift Ltd, -%% Cohesive Financial Technologies LLC, and Rabbit Technologies Ltd. -%% -%% Portions created before 22-Nov-2008 00:00:00 GMT by LShift Ltd, -%% Cohesive Financial Technologies LLC, or Rabbit Technologies Ltd -%% are Copyright (C) 2007-2008 LShift Ltd, Cohesive Financial -%% Technologies LLC, and Rabbit Technologies Ltd. -%% -%% Portions created by LShift Ltd are Copyright (C) 2007-2010 LShift -%% Ltd. Portions created by Cohesive Financial Technologies LLC are -%% Copyright (C) 2007-2010 Cohesive Financial Technologies -%% LLC. Portions created by Rabbit Technologies Ltd are Copyright -%% (C) 2007-2010 Rabbit Technologies Ltd. -%% -%% All Rights Reserved. -%% -%% Contributor(s): ______________________________________. +%% The Initial Developer of the Original Code is VMware, Inc. +%% Copyright (c) 2007-2011 VMware, Inc. All rights reserved. %% -module(rabbit_misc). -include("rabbit.hrl"). -include("rabbit_framing.hrl"). + -include_lib("kernel/include/file.hrl"). -export([method_record_type/1, polite_pause/0, polite_pause/1]). -export([die/1, frame_error/2, amqp_error/4, - protocol_error/3, protocol_error/4]). --export([not_found/1]). --export([get_config/1, get_config/2, set_config/2]). + protocol_error/3, protocol_error/4, protocol_error/1]). +-export([not_found/1, assert_args_equivalence/4]). -export([dirty_read/1]). +-export([table_lookup/2]). -export([r/3, r/2, r_arg/4, rs/1]). -export([enable_cover/0, report_cover/0]). -export([enable_cover/1, report_cover/1]). +-export([start_cover/1]). -export([throw_on_error/2, with_exit_handler/2, filter_exit_map/2]). --export([with_user/2, with_vhost/2, with_user_and_vhost/3]). +-export([with_user/2, with_user_and_vhost/3]). -export([execute_mnesia_transaction/1]). +-export([execute_mnesia_transaction/2]). +-export([execute_mnesia_tx_with_tail/1]). -export([ensure_ok/2]). -export([makenode/1, nodeparts/1, cookie_hash/0, tcp_name/3]). --export([intersperse/2, upmap/2, map_in_order/2]). --export([table_foreach/2]). +-export([upmap/2, map_in_order/2]). +-export([table_fold/3]). -export([dirty_read_all/1, dirty_foreach_key/2, dirty_dump_log/1]). -export([read_term_file/1, write_term_file/2]). -export([append_file/2, ensure_parent_dirs_exist/1]). -export([format_stderr/2]). -export([start_applications/1, stop_applications/1]). -export([unfold/2, ceil/1, queue_fold/3]). +-export([sort_field_table/1]). -export([pid_to_string/1, string_to_pid/1]). -export([version_compare/2, version_compare/3]). - --import(mnesia). --import(lists). --import(cover). --import(disk_log). +-export([recursive_delete/1, recursive_copy/2, dict_cons/3, orddict_cons/3, + unlink_and_capture_exit/1]). +-export([get_options/2]). +-export([all_module_attributes/1, build_acyclic_graph/3]). +-export([now_ms/0]). +-export([lock_file/1]). +-export([const_ok/1, const/1]). +-export([ntoa/1, ntoab/1]). %%---------------------------------------------------------------------------- -ifdef(use_specs). --include_lib("kernel/include/inet.hrl"). +-export_type([resource_name/0, thunk/1, const/1]). --type(ok_or_error() :: 'ok' | {'error', any()}). +-type(ok_or_error() :: rabbit_types:ok_or_error(any())). +-type(thunk(T) :: fun(() -> T)). +-type(const(T) :: fun((any()) -> T)). +-type(resource_name() :: binary()). +-type(optdef() :: {flag, string()} | {option, string(), any()}). +-type(channel_or_connection_exit() + :: rabbit_types:channel_exit() | rabbit_types:connection_exit()). +-type(digraph_label() :: term()). +-type(graph_vertex_fun() :: + fun ((atom(), [term()]) -> [{digraph:vertex(), digraph_label()}])). +-type(graph_edge_fun() :: + fun ((atom(), [term()]) -> [{digraph:vertex(), digraph:vertex()}])). --spec(method_record_type/1 :: (tuple()) -> atom()). +-spec(method_record_type/1 :: (rabbit_framing:amqp_method_record()) + -> rabbit_framing:amqp_method_name()). -spec(polite_pause/0 :: () -> 'done'). -spec(polite_pause/1 :: (non_neg_integer()) -> 'done'). --spec(die/1 :: (atom()) -> no_return()). --spec(frame_error/2 :: (atom(), binary()) -> no_return()). --spec(amqp_error/4 :: (atom(), string(), [any()], atom()) -> amqp_error()). --spec(protocol_error/3 :: (atom(), string(), [any()]) -> no_return()). --spec(protocol_error/4 :: (atom(), string(), [any()], atom()) -> no_return()). --spec(not_found/1 :: (r(atom())) -> no_return()). --spec(get_config/1 :: (atom()) -> {'ok', any()} | not_found()). --spec(get_config/2 :: (atom(), A) -> A). --spec(set_config/2 :: (atom(), any()) -> 'ok'). --spec(dirty_read/1 :: ({atom(), any()}) -> {'ok', any()} | not_found()). --spec(r/3 :: (vhost() | r(atom()), K, resource_name()) -> - r(K) when is_subtype(K, atom())). --spec(r/2 :: (vhost(), K) -> #resource{virtual_host :: vhost(), - kind :: K, - name :: '_'} - when is_subtype(K, atom())). --spec(r_arg/4 :: (vhost() | r(atom()), K, amqp_table(), binary()) -> - undefined | r(K) when is_subtype(K, atom())). --spec(rs/1 :: (r(atom())) -> string()). +-spec(die/1 :: + (rabbit_framing:amqp_exception()) -> channel_or_connection_exit()). +-spec(frame_error/2 :: (rabbit_framing:amqp_method_name(), binary()) + -> rabbit_types:connection_exit()). +-spec(amqp_error/4 :: + (rabbit_framing:amqp_exception(), string(), [any()], + rabbit_framing:amqp_method_name()) + -> rabbit_types:amqp_error()). +-spec(protocol_error/3 :: (rabbit_framing:amqp_exception(), string(), [any()]) + -> channel_or_connection_exit()). +-spec(protocol_error/4 :: + (rabbit_framing:amqp_exception(), string(), [any()], + rabbit_framing:amqp_method_name()) -> channel_or_connection_exit()). +-spec(protocol_error/1 :: + (rabbit_types:amqp_error()) -> channel_or_connection_exit()). +-spec(not_found/1 :: (rabbit_types:r(atom())) -> rabbit_types:channel_exit()). +-spec(assert_args_equivalence/4 :: (rabbit_framing:amqp_table(), + rabbit_framing:amqp_table(), + rabbit_types:r(any()), [binary()]) -> + 'ok' | rabbit_types:connection_exit()). +-spec(dirty_read/1 :: + ({atom(), any()}) -> rabbit_types:ok_or_error2(any(), 'not_found')). +-spec(table_lookup/2 :: + (rabbit_framing:amqp_table(), binary()) + -> 'undefined' | {rabbit_framing:amqp_field_type(), any()}). +-spec(r/2 :: (rabbit_types:vhost(), K) + -> rabbit_types:r3(rabbit_types:vhost(), K, '_') + when is_subtype(K, atom())). +-spec(r/3 :: + (rabbit_types:vhost() | rabbit_types:r(atom()), K, resource_name()) + -> rabbit_types:r3(rabbit_types:vhost(), K, resource_name()) + when is_subtype(K, atom())). +-spec(r_arg/4 :: + (rabbit_types:vhost() | rabbit_types:r(atom()), K, + rabbit_framing:amqp_table(), binary()) + -> undefined | rabbit_types:r(K) + when is_subtype(K, atom())). +-spec(rs/1 :: (rabbit_types:r(atom())) -> string()). -spec(enable_cover/0 :: () -> ok_or_error()). +-spec(start_cover/1 :: ([{string(), string()} | string()]) -> 'ok'). -spec(report_cover/0 :: () -> 'ok'). --spec(enable_cover/1 :: (string()) -> ok_or_error()). --spec(report_cover/1 :: (string()) -> 'ok'). +-spec(enable_cover/1 :: ([file:filename() | atom()]) -> ok_or_error()). +-spec(report_cover/1 :: ([file:filename() | atom()]) -> 'ok'). -spec(throw_on_error/2 :: - (atom(), thunk({error, any()} | {ok, A} | A)) -> A). + (atom(), thunk(rabbit_types:error(any()) | {ok, A} | A)) -> A). -spec(with_exit_handler/2 :: (thunk(A), thunk(A)) -> A). -spec(filter_exit_map/2 :: (fun ((A) -> B), [A]) -> [B]). --spec(with_user/2 :: (username(), thunk(A)) -> A). --spec(with_vhost/2 :: (vhost(), thunk(A)) -> A). --spec(with_user_and_vhost/3 :: (username(), vhost(), thunk(A)) -> A). +-spec(with_user/2 :: (rabbit_types:username(), thunk(A)) -> A). +-spec(with_user_and_vhost/3 :: + (rabbit_types:username(), rabbit_types:vhost(), thunk(A)) + -> A). -spec(execute_mnesia_transaction/1 :: (thunk(A)) -> A). +-spec(execute_mnesia_transaction/2 :: + (thunk(A), fun ((A, boolean()) -> B)) -> B). +-spec(execute_mnesia_tx_with_tail/1 :: + (thunk(fun ((boolean()) -> B))) -> B | (fun ((boolean()) -> B))). -spec(ensure_ok/2 :: (ok_or_error(), atom()) -> 'ok'). --spec(makenode/1 :: ({string(), string()} | string()) -> erlang_node()). --spec(nodeparts/1 :: (erlang_node() | string()) -> {string(), string()}). +-spec(makenode/1 :: ({string(), string()} | string()) -> node()). +-spec(nodeparts/1 :: (node() | string()) -> {string(), string()}). -spec(cookie_hash/0 :: () -> string()). --spec(tcp_name/3 :: (atom(), ip_address(), ip_port()) -> atom()). --spec(intersperse/2 :: (A, [A]) -> [A]). +-spec(tcp_name/3 :: + (atom(), inet:ip_address(), rabbit_networking:ip_port()) + -> atom()). -spec(upmap/2 :: (fun ((A) -> B), [A]) -> [B]). -spec(map_in_order/2 :: (fun ((A) -> B), [A]) -> [B]). --spec(table_foreach/2 :: (fun ((any()) -> any()), atom()) -> 'ok'). +-spec(table_fold/3 :: (fun ((any(), A) -> A), A, atom()) -> A). -spec(dirty_read_all/1 :: (atom()) -> [any()]). --spec(dirty_foreach_key/2 :: (fun ((any()) -> any()), atom()) -> - 'ok' | 'aborted'). --spec(dirty_dump_log/1 :: (string()) -> ok_or_error()). --spec(read_term_file/1 :: (string()) -> {'ok', [any()]} | {'error', any()}). --spec(write_term_file/2 :: (string(), [any()]) -> ok_or_error()). --spec(append_file/2 :: (string(), string()) -> ok_or_error()). +-spec(dirty_foreach_key/2 :: (fun ((any()) -> any()), atom()) + -> 'ok' | 'aborted'). +-spec(dirty_dump_log/1 :: (file:filename()) -> ok_or_error()). +-spec(read_term_file/1 :: + (file:filename()) -> {'ok', [any()]} | rabbit_types:error(any())). +-spec(write_term_file/2 :: (file:filename(), [any()]) -> ok_or_error()). +-spec(append_file/2 :: (file:filename(), string()) -> ok_or_error()). -spec(ensure_parent_dirs_exist/1 :: (string()) -> 'ok'). -spec(format_stderr/2 :: (string(), [any()]) -> 'ok'). -spec(start_applications/1 :: ([atom()]) -> 'ok'). -spec(stop_applications/1 :: ([atom()]) -> 'ok'). -spec(unfold/2 :: (fun ((A) -> ({'true', B, A} | 'false')), A) -> {[B], A}). --spec(ceil/1 :: (number()) -> number()). +-spec(ceil/1 :: (number()) -> integer()). -spec(queue_fold/3 :: (fun ((any(), B) -> B), B, queue()) -> B). +-spec(sort_field_table/1 :: + (rabbit_framing:amqp_table()) -> rabbit_framing:amqp_table()). -spec(pid_to_string/1 :: (pid()) -> string()). -spec(string_to_pid/1 :: (string()) -> pid()). +-spec(version_compare/2 :: (string(), string()) -> 'lt' | 'eq' | 'gt'). +-spec(version_compare/3 :: + (string(), string(), ('lt' | 'lte' | 'eq' | 'gte' | 'gt')) + -> boolean()). +-spec(recursive_delete/1 :: + ([file:filename()]) + -> rabbit_types:ok_or_error({file:filename(), any()})). +-spec(recursive_copy/2 :: + (file:filename(), file:filename()) + -> rabbit_types:ok_or_error({file:filename(), file:filename(), any()})). +-spec(dict_cons/3 :: (any(), any(), dict()) -> dict()). +-spec(orddict_cons/3 :: (any(), any(), orddict:orddict()) -> orddict:orddict()). +-spec(unlink_and_capture_exit/1 :: (pid()) -> 'ok'). +-spec(get_options/2 :: ([optdef()], [string()]) + -> {[string()], [{string(), any()}]}). +-spec(all_module_attributes/1 :: (atom()) -> [{atom(), [term()]}]). +-spec(build_acyclic_graph/3 :: + (graph_vertex_fun(), graph_edge_fun(), [{atom(), [term()]}]) + -> rabbit_types:ok_or_error2(digraph(), + {'vertex', 'duplicate', digraph:vertex()} | + {'edge', ({bad_vertex, digraph:vertex()} | + {bad_edge, [digraph:vertex()]}), + digraph:vertex(), digraph:vertex()})). +-spec(now_ms/0 :: () -> non_neg_integer()). +-spec(lock_file/1 :: (file:filename()) -> rabbit_types:ok_or_error('eexist')). +-spec(const_ok/1 :: (any()) -> 'ok'). +-spec(const/1 :: (A) -> const(A)). +-spec(ntoa/1 :: (inet:ip_address()) -> string()). +-spec(ntoab/1 :: (inet:ip_address()) -> string()). -endif. @@ -161,24 +224,35 @@ protocol_error(Name, ExplanationFormat, Params, none). protocol_error(Name, ExplanationFormat, Params, Method) -> - exit(amqp_error(Name, ExplanationFormat, Params, Method)). + protocol_error(amqp_error(Name, ExplanationFormat, Params, Method)). -not_found(R) -> protocol_error(not_found, "no ~s", [rs(R)]). +protocol_error(#amqp_error{} = Error) -> + exit(Error). -get_config(Key) -> - case dirty_read({rabbit_config, Key}) of - {ok, {rabbit_config, Key, V}} -> {ok, V}; - Other -> Other - end. +not_found(R) -> protocol_error(not_found, "no ~s", [rs(R)]). -get_config(Key, DefaultValue) -> - case get_config(Key) of - {ok, V} -> V; - {error, not_found} -> DefaultValue - end. +assert_args_equivalence(Orig, New, Name, Keys) -> + [assert_args_equivalence1(Orig, New, Name, Key) || Key <- Keys], + ok. -set_config(Key, Value) -> - ok = mnesia:dirty_write({rabbit_config, Key, Value}). +assert_args_equivalence1(Orig, New, Name, Key) -> + case {table_lookup(Orig, Key), table_lookup(New, Key)} of + {Same, Same} -> ok; + {Orig1, New1} -> protocol_error( + precondition_failed, + "inequivalent arg '~s' for ~s: " + "received ~s but current is ~s", + [Key, rs(Name), val(New1), val(Orig1)]) + end. + +val(undefined) -> + "none"; +val({Type, Value}) -> + Fmt = case is_binary(Value) of + true -> "the value '~s' of type '~s'"; + false -> "the value '~w' of type '~s'" + end, + lists:flatten(io_lib:format(Fmt, [Value, Type])). dirty_read(ReadSpec) -> case mnesia:dirty_read(ReadSpec) of @@ -186,6 +260,12 @@ [] -> {error, not_found} end. +table_lookup(Table, Key) -> + case lists:keysearch(Key, 1, Table) of + {value, {_, TypeBin, ValueBin}} -> {TypeBin, ValueBin}; + false -> undefined + end. + r(#resource{virtual_host = VHostPath}, Kind, Name) when is_binary(Name) -> #resource{virtual_host = VHostPath, kind = Kind, name = Name}; @@ -198,40 +278,45 @@ r_arg(#resource{virtual_host = VHostPath}, Kind, Table, Key) -> r_arg(VHostPath, Kind, Table, Key); r_arg(VHostPath, Kind, Table, Key) -> - case lists:keysearch(Key, 1, Table) of - {value, {_, longstr, NameBin}} -> r(VHostPath, Kind, NameBin); - false -> undefined + case table_lookup(Table, Key) of + {longstr, NameBin} -> r(VHostPath, Kind, NameBin); + undefined -> undefined end. rs(#resource{virtual_host = VHostPath, kind = Kind, name = Name}) -> lists:flatten(io_lib:format("~s '~s' in vhost '~s'", [Kind, Name, VHostPath])). -enable_cover() -> - enable_cover("."). +enable_cover() -> enable_cover(["."]). + +enable_cover(Dirs) -> + lists:foldl(fun (Dir, ok) -> + case cover:compile_beam_directory( + filename:join(lists:concat([Dir]),"ebin")) of + {error, _} = Err -> Err; + _ -> ok + end; + (_Dir, Err) -> + Err + end, ok, Dirs). + +start_cover(NodesS) -> + {ok, _} = cover:start([makenode(N) || N <- NodesS]), + ok. + +report_cover() -> report_cover(["."]). + +report_cover(Dirs) -> [report_cover1(lists:concat([Dir])) || Dir <- Dirs], ok. -enable_cover([Root]) when is_atom(Root) -> - enable_cover(atom_to_list(Root)); -enable_cover(Root) -> - case cover:compile_beam_directory(filename:join(Root, "ebin")) of - {error,Reason} -> {error,Reason}; - _ -> ok - end. - -report_cover() -> - report_cover("."). - -report_cover([Root]) when is_atom(Root) -> - report_cover(atom_to_list(Root)); -report_cover(Root) -> +report_cover1(Root) -> Dir = filename:join(Root, "cover"), - ok = filelib:ensure_dir(filename:join(Dir,"junk")), - lists:foreach(fun(F) -> file:delete(F) end, + ok = filelib:ensure_dir(filename:join(Dir, "junk")), + lists:foreach(fun (F) -> file:delete(F) end, filelib:wildcard(filename:join(Dir, "*.html"))), {ok, SummaryFile} = file:open(filename:join(Dir, "summary.txt"), [write]), {CT, NCT} = lists:foldl( - fun(M,{CovTot, NotCovTot}) -> + fun (M,{CovTot, NotCovTot}) -> {ok, {M, {Cov, NotCov}}} = cover:analyze(M, module), ok = report_coverage_percentage(SummaryFile, Cov, NotCov, M), @@ -265,8 +350,8 @@ with_exit_handler(Handler, Thunk) -> try Thunk() - catch - exit:{R, _} when R =:= noproc; R =:= normal; R =:= shutdown -> + catch exit:{R, _} when R =:= noproc; R =:= nodedown; + R =:= normal; R =:= shutdown -> Handler() end. @@ -287,29 +372,47 @@ end end. -with_vhost(VHostPath, Thunk) -> - fun () -> - case mnesia:read({rabbit_vhost, VHostPath}) of - [] -> - mnesia:abort({no_such_vhost, VHostPath}); - [_V] -> - Thunk() - end - end. - with_user_and_vhost(Username, VHostPath, Thunk) -> - with_user(Username, with_vhost(VHostPath, Thunk)). - + with_user(Username, rabbit_vhost:with(VHostPath, Thunk)). execute_mnesia_transaction(TxFun) -> %% Making this a sync_transaction allows us to use dirty_read %% elsewhere and get a consistent result even when that read %% executes on a different node. - case mnesia:sync_transaction(TxFun) of + case worker_pool:submit({mnesia, sync_transaction, [TxFun]}) of {atomic, Result} -> Result; {aborted, Reason} -> throw({error, Reason}) end. + +%% Like execute_mnesia_transaction/1 with additional Pre- and Post- +%% commit function +execute_mnesia_transaction(TxFun, PrePostCommitFun) -> + case mnesia:is_transaction() of + true -> throw(unexpected_transaction); + false -> ok + end, + PrePostCommitFun(execute_mnesia_transaction( + fun () -> + Result = TxFun(), + PrePostCommitFun(Result, true), + Result + end), false). + +%% Like execute_mnesia_transaction/2, but TxFun is expected to return a +%% TailFun which gets called immediately before and after the tx commit +execute_mnesia_tx_with_tail(TxFun) -> + case mnesia:is_transaction() of + true -> execute_mnesia_transaction(TxFun); + false -> TailFun = execute_mnesia_transaction( + fun () -> + TailFun1 = TxFun(), + TailFun1(true), + TailFun1 + end), + TailFun(false) + end. + ensure_ok(ok, _) -> ok; ensure_ok({error, Reason}, ErrorTag) -> throw({error, {ErrorTag, Reason}}). @@ -337,10 +440,6 @@ io_lib:format("~w_~s:~w", [Prefix, inet_parse:ntoa(IPAddress), Port]))). -intersperse(_, []) -> []; -intersperse(_, [E]) -> [E]; -intersperse(Sep, [E|T]) -> [E, Sep | intersperse(Sep, T)]. - %% This is a modified version of Luke Gorrie's pmap - %% http://lukego.livejournal.com/6753.html - that doesn't care about %% the order in which results are received. @@ -351,26 +450,26 @@ Parent = self(), Ref = make_ref(), [receive {Ref, Result} -> Result end - || _ <- [spawn(fun() -> Parent ! {Ref, F(X)} end) || X <- L]]. + || _ <- [spawn(fun () -> Parent ! {Ref, F(X)} end) || X <- L]]. map_in_order(F, L) -> lists:reverse( lists:foldl(fun (E, Acc) -> [F(E) | Acc] end, [], L)). -%% For each entry in a table, execute a function in a transaction. -%% This is often far more efficient than wrapping a tx around the lot. +%% Fold over each entry in a table, executing the cons function in a +%% transaction. This is often far more efficient than wrapping a tx +%% around the lot. %% %% We ignore entries that have been modified or removed. -table_foreach(F, TableName) -> - lists:foreach( - fun (E) -> execute_mnesia_transaction( +table_fold(F, Acc0, TableName) -> + lists:foldl( + fun (E, Acc) -> execute_mnesia_transaction( fun () -> case mnesia:match_object(TableName, E, read) of - [] -> ok; - _ -> F(E) + [] -> Acc; + _ -> F(E, Acc) end end) - end, dirty_read_all(TableName)), - ok. + end, Acc0, dirty_read_all(TableName)). dirty_read_all(TableName) -> mnesia:dirty_select(TableName, [{'$1',[],['$1']}]). @@ -504,63 +603,41 @@ {{value, V}, Q1} -> queue_fold(Fun, Fun(V, Init), Q1) end. +%% Sorts a list of AMQP table fields as per the AMQP spec +sort_field_table(Arguments) -> + lists:keysort(1, Arguments). + %% This provides a string representation of a pid that is the same %% regardless of what node we are running on. The representation also %% permits easy identification of the pid's node. pid_to_string(Pid) when is_pid(Pid) -> %% see http://erlang.org/doc/apps/erts/erl_ext_dist.html (8.10 and %% 8.7) - <<131,103,100,NodeLen:16,NodeBin:NodeLen/binary,Id:32,Ser:32,_Cre:8>> + <<131,103,100,NodeLen:16,NodeBin:NodeLen/binary,Id:32,Ser:32,Cre:8>> = term_to_binary(Pid), Node = binary_to_term(<<131,100,NodeLen:16,NodeBin:NodeLen/binary>>), - lists:flatten(io_lib:format("<~w.~B.~B>", [Node, Id, Ser])). + lists:flatten(io_lib:format("<~w.~B.~B.~B>", [Node, Cre, Id, Ser])). %% inverse of above string_to_pid(Str) -> - ErrorFun = fun () -> throw({error, {invalid_pid_syntax, Str}}) end, - %% TODO: simplify this code by using the 're' module, once we drop - %% support for R11 - %% - %% 1) sanity check + Err = {error, {invalid_pid_syntax, Str}}, %% The \ before the trailing $ is only there to keep emacs %% font-lock from getting confused. - case regexp:first_match(Str, "^<.*\\.[0-9]+\\.[0-9]+>\$") of - {match, _, _} -> - %% 2) strip <> - Str1 = string:substr(Str, 2, string:len(Str) - 2), - %% 3) extract three constituent parts, taking care to - %% handle dots in the node part (hence the reverse and concat) - [SerStr, IdStr | Rest] = lists:reverse(string:tokens(Str1, ".")), - NodeStr = lists:concat(lists:reverse(Rest)), - %% 4) construct a triple term from the three parts - TripleStr = lists:flatten(io_lib:format("{~s,~s,~s}.", - [NodeStr, IdStr, SerStr])), - %% 5) parse the triple - Tokens = case erl_scan:string(TripleStr) of - {ok, Tokens1, _} -> Tokens1; - {error, _, _} -> ErrorFun() - end, - Term = case erl_parse:parse_term(Tokens) of - {ok, Term1} -> Term1; - {error, _} -> ErrorFun() - end, - {Node, Id, Ser} = - case Term of - {Node1, Id1, Ser1} when is_atom(Node1) andalso - is_integer(Id1) andalso - is_integer(Ser1) -> - Term; - _ -> - ErrorFun() - end, - %% 6) turn the triple into a pid - see pid_to_string - <<131,NodeEnc/binary>> = term_to_binary(Node), - binary_to_term(<<131,103,NodeEnc/binary,Id:32,Ser:32,0:8>>); + case re:run(Str, "^<(.*)\\.(\\d+)\\.(\\d+)\\.(\\d+)>\$", + [{capture,all_but_first,list}]) of + {match, [NodeStr, CreStr, IdStr, SerStr]} -> + %% the NodeStr atom might be quoted, so we have to parse + %% it rather than doing a simple list_to_atom + NodeAtom = case erl_scan:string(NodeStr) of + {ok, [{atom, _, X}], _} -> X; + {error, _, _} -> throw(Err) + end, + <<131,NodeEnc/binary>> = term_to_binary(NodeAtom), + [Cre, Id, Ser] = lists:map(fun list_to_integer/1, + [CreStr, IdStr, SerStr]), + binary_to_term(<<131,103,NodeEnc/binary,Id:32,Ser:32,Cre:8>>); nomatch -> - ErrorFun(); - Error -> - %% invalid regexp - shouldn't happen - throw(Error) + throw(Err) end. version_compare(A, B, lte) -> @@ -578,20 +655,206 @@ version_compare(A, B, Result) -> Result =:= version_compare(A, B). -version_compare([], []) -> +version_compare(A, A) -> eq; -version_compare([], _ ) -> +version_compare([], [$0 | B]) -> + version_compare([], dropdot(B)); +version_compare([], _) -> lt; %% 2.3 < 2.3.1 -version_compare(_ , []) -> +version_compare([$0 | A], []) -> + version_compare(dropdot(A), []); +version_compare(_, []) -> gt; %% 2.3.1 > 2.3 version_compare(A, B) -> {AStr, ATl} = lists:splitwith(fun (X) -> X =/= $. end, A), {BStr, BTl} = lists:splitwith(fun (X) -> X =/= $. end, B), ANum = list_to_integer(AStr), BNum = list_to_integer(BStr), - if ANum =:= BNum -> ATl1 = lists:dropwhile(fun (X) -> X =:= $. end, ATl), - BTl1 = lists:dropwhile(fun (X) -> X =:= $. end, BTl), - version_compare(ATl1, BTl1); + if ANum =:= BNum -> version_compare(dropdot(ATl), dropdot(BTl)); ANum < BNum -> lt; ANum > BNum -> gt end. + +dropdot(A) -> lists:dropwhile(fun (X) -> X =:= $. end, A). + +recursive_delete(Files) -> + lists:foldl(fun (Path, ok ) -> recursive_delete1(Path); + (_Path, {error, _Err} = Error) -> Error + end, ok, Files). + +recursive_delete1(Path) -> + case filelib:is_dir(Path) of + false -> case file:delete(Path) of + ok -> ok; + {error, enoent} -> ok; %% Path doesn't exist anyway + {error, Err} -> {error, {Path, Err}} + end; + true -> case file:list_dir(Path) of + {ok, FileNames} -> + case lists:foldl( + fun (FileName, ok) -> + recursive_delete1( + filename:join(Path, FileName)); + (_FileName, Error) -> + Error + end, ok, FileNames) of + ok -> + case file:del_dir(Path) of + ok -> ok; + {error, Err} -> {error, {Path, Err}} + end; + {error, _Err} = Error -> + Error + end; + {error, Err} -> + {error, {Path, Err}} + end + end. + +recursive_copy(Src, Dest) -> + case filelib:is_dir(Src) of + false -> case file:copy(Src, Dest) of + {ok, _Bytes} -> ok; + {error, enoent} -> ok; %% Path doesn't exist anyway + {error, Err} -> {error, {Src, Dest, Err}} + end; + true -> case file:list_dir(Src) of + {ok, FileNames} -> + case file:make_dir(Dest) of + ok -> + lists:foldl( + fun (FileName, ok) -> + recursive_copy( + filename:join(Src, FileName), + filename:join(Dest, FileName)); + (_FileName, Error) -> + Error + end, ok, FileNames); + {error, Err} -> + {error, {Src, Dest, Err}} + end; + {error, Err} -> + {error, {Src, Dest, Err}} + end + end. + +dict_cons(Key, Value, Dict) -> + dict:update(Key, fun (List) -> [Value | List] end, [Value], Dict). + +orddict_cons(Key, Value, Dict) -> + orddict:update(Key, fun (List) -> [Value | List] end, [Value], Dict). + +unlink_and_capture_exit(Pid) -> + unlink(Pid), + receive {'EXIT', Pid, _} -> ok + after 0 -> ok + end. + +% Separate flags and options from arguments. +% get_options([{flag, "-q"}, {option, "-p", "/"}], +% ["set_permissions","-p","/","guest", +% "-q",".*",".*",".*"]) +% == {["set_permissions","guest",".*",".*",".*"], +% [{"-q",true},{"-p","/"}]} +get_options(Defs, As) -> + lists:foldl(fun(Def, {AsIn, RsIn}) -> + {AsOut, Value} = case Def of + {flag, Key} -> + get_flag(Key, AsIn); + {option, Key, Default} -> + get_option(Key, Default, AsIn) + end, + {AsOut, [{Key, Value} | RsIn]} + end, {As, []}, Defs). + +get_option(K, _Default, [K, V | As]) -> + {As, V}; +get_option(K, Default, [Nk | As]) -> + {As1, V} = get_option(K, Default, As), + {[Nk | As1], V}; +get_option(_, Default, As) -> + {As, Default}. + +get_flag(K, [K | As]) -> + {As, true}; +get_flag(K, [Nk | As]) -> + {As1, V} = get_flag(K, As), + {[Nk | As1], V}; +get_flag(_, []) -> + {[], false}. + +now_ms() -> + timer:now_diff(now(), {0,0,0}) div 1000. + +module_attributes(Module) -> + case catch Module:module_info(attributes) of + {'EXIT', {undef, [{Module, module_info, _} | _]}} -> + io:format("WARNING: module ~p not found, so not scanned for boot steps.~n", + [Module]), + []; + {'EXIT', Reason} -> + exit(Reason); + V -> + V + end. + +all_module_attributes(Name) -> + Modules = + lists:usort( + lists:append( + [Modules || {App, _, _} <- application:loaded_applications(), + {ok, Modules} <- [application:get_key(App, modules)]])), + lists:foldl( + fun (Module, Acc) -> + case lists:append([Atts || {N, Atts} <- module_attributes(Module), + N =:= Name]) of + [] -> Acc; + Atts -> [{Module, Atts} | Acc] + end + end, [], Modules). + + +build_acyclic_graph(VertexFun, EdgeFun, Graph) -> + G = digraph:new([acyclic]), + try + [case digraph:vertex(G, Vertex) of + false -> digraph:add_vertex(G, Vertex, Label); + _ -> ok = throw({graph_error, {vertex, duplicate, Vertex}}) + end || {Module, Atts} <- Graph, + {Vertex, Label} <- VertexFun(Module, Atts)], + [case digraph:add_edge(G, From, To) of + {error, E} -> throw({graph_error, {edge, E, From, To}}); + _ -> ok + end || {Module, Atts} <- Graph, + {From, To} <- EdgeFun(Module, Atts)], + {ok, G} + catch {graph_error, Reason} -> + true = digraph:delete(G), + {error, Reason} + end. + +%% TODO: When we stop supporting Erlang prior to R14, this should be +%% replaced with file:open [write, exclusive] +lock_file(Path) -> + case filelib:is_file(Path) of + true -> {error, eexist}; + false -> {ok, Lock} = file:open(Path, [write]), + ok = file:close(Lock) + end. + +const_ok(_) -> ok. +const(X) -> fun (_) -> X end. + +%% Format IPv4-mapped IPv6 addresses as IPv4, since they're what we see +%% when IPv6 is enabled but not used (i.e. 99% of the time). +ntoa({0,0,0,0,0,16#ffff,AB,CD}) -> + inet_parse:ntoa({AB bsr 8, AB rem 256, CD bsr 8, CD rem 256}); +ntoa(IP) -> + inet_parse:ntoa(IP). + +ntoab(IP) -> + Str = ntoa(IP), + case string:str(Str, ":") of + 0 -> Str; + _ -> "[" ++ Str ++ "]" + end. diff -Nru rabbitmq-server-1.7.2/src/rabbit_mnesia.erl rabbitmq-server-2.3.1/src/rabbit_mnesia.erl --- rabbitmq-server-1.7.2/src/rabbit_mnesia.erl 2010-02-15 16:01:03.000000000 +0000 +++ rabbitmq-server-2.3.1/src/rabbit_mnesia.erl 2011-02-03 12:47:35.000000000 +0000 @@ -1,39 +1,26 @@ -%% The contents of this file are subject to the Mozilla Public License -%% Version 1.1 (the "License"); you may not use this file except in -%% compliance with the License. You may obtain a copy of the License at -%% http://www.mozilla.org/MPL/ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License +%% at http://www.mozilla.org/MPL/ %% -%% Software distributed under the License is distributed on an "AS IS" -%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the -%% License for the specific language governing rights and limitations -%% under the License. +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and +%% limitations under the License. %% -%% The Original Code is RabbitMQ. +%% The Original Code is RabbitMQ. %% -%% The Initial Developers of the Original Code are LShift Ltd, -%% Cohesive Financial Technologies LLC, and Rabbit Technologies Ltd. -%% -%% Portions created before 22-Nov-2008 00:00:00 GMT by LShift Ltd, -%% Cohesive Financial Technologies LLC, or Rabbit Technologies Ltd -%% are Copyright (C) 2007-2008 LShift Ltd, Cohesive Financial -%% Technologies LLC, and Rabbit Technologies Ltd. -%% -%% Portions created by LShift Ltd are Copyright (C) 2007-2010 LShift -%% Ltd. Portions created by Cohesive Financial Technologies LLC are -%% Copyright (C) 2007-2010 Cohesive Financial Technologies -%% LLC. Portions created by Rabbit Technologies Ltd are Copyright -%% (C) 2007-2010 Rabbit Technologies Ltd. -%% -%% All Rights Reserved. -%% -%% Contributor(s): ______________________________________. +%% The Initial Developer of the Original Code is VMware, Inc. +%% Copyright (c) 2007-2011 VMware, Inc. All rights reserved. %% + -module(rabbit_mnesia). -export([ensure_mnesia_dir/0, dir/0, status/0, init/0, is_db_empty/0, - cluster/1, reset/0, force_reset/0, is_clustered/0, - empty_ram_only_tables/0]). + cluster/1, force_cluster/1, reset/0, force_reset/0, + is_clustered/0, running_clustered_nodes/0, all_clustered_nodes/0, + empty_ram_only_tables/0, copy_db/1]). -export([table_names/0]). @@ -47,48 +34,74 @@ -ifdef(use_specs). --spec(status/0 :: () -> [{'nodes' | 'running_nodes', [erlang_node()]}]). --spec(dir/0 :: () -> string()). +-export_type([node_type/0]). + +-type(node_type() :: disc_only | disc | ram | unknown). +-spec(status/0 :: () -> [{'nodes', [{node_type(), [node()]}]} | + {'running_nodes', [node()]}]). +-spec(dir/0 :: () -> file:filename()). -spec(ensure_mnesia_dir/0 :: () -> 'ok'). -spec(init/0 :: () -> 'ok'). -spec(is_db_empty/0 :: () -> boolean()). --spec(cluster/1 :: ([erlang_node()]) -> 'ok'). +-spec(cluster/1 :: ([node()]) -> 'ok'). +-spec(force_cluster/1 :: ([node()]) -> 'ok'). +-spec(cluster/2 :: ([node()], boolean()) -> 'ok'). -spec(reset/0 :: () -> 'ok'). -spec(force_reset/0 :: () -> 'ok'). -spec(is_clustered/0 :: () -> boolean()). +-spec(running_clustered_nodes/0 :: () -> [node()]). +-spec(all_clustered_nodes/0 :: () -> [node()]). -spec(empty_ram_only_tables/0 :: () -> 'ok'). -spec(create_tables/0 :: () -> 'ok'). +-spec(copy_db/1 :: (file:filename()) -> rabbit_types:ok_or_error(any())). -endif. %%---------------------------------------------------------------------------- status() -> - [{nodes, mnesia:system_info(db_nodes)}, - {running_nodes, mnesia:system_info(running_db_nodes)}]. + [{nodes, case mnesia:system_info(is_running) of + yes -> [{Key, Nodes} || + {Key, CopyType} <- [{disc_only, disc_only_copies}, + {disc, disc_copies}, + {ram, ram_copies}], + begin + Nodes = nodes_of_type(CopyType), + Nodes =/= [] + end]; + no -> case all_clustered_nodes() of + [] -> []; + Nodes -> [{unknown, Nodes}] + end + end}, + {running_nodes, running_clustered_nodes()}]. init() -> ok = ensure_mnesia_running(), ok = ensure_mnesia_dir(), - ok = init_db(read_cluster_nodes_config()), - ok = wait_for_tables(), + ok = init_db(read_cluster_nodes_config(), true), ok. is_db_empty() -> lists:all(fun (Tab) -> mnesia:dirty_first(Tab) == '$end_of_table' end, table_names()). +cluster(ClusterNodes) -> + cluster(ClusterNodes, false). +force_cluster(ClusterNodes) -> + cluster(ClusterNodes, true). + %% Alter which disk nodes this node is clustered with. This can be a %% subset of all the disk nodes in the cluster but can (and should) %% include the node itself if it is to be a disk rather than a ram -%% node. -cluster(ClusterNodes) -> +%% node. If Force is false, only connections to online nodes are +%% allowed. +cluster(ClusterNodes, Force) -> ok = ensure_mnesia_not_running(), ok = ensure_mnesia_dir(), rabbit_misc:ensure_ok(mnesia:start(), cannot_start_mnesia), try - ok = init_db(ClusterNodes), - ok = wait_for_tables(), + ok = init_db(ClusterNodes, Force), ok = create_cluster_nodes_config(ClusterNodes) after mnesia:stop() @@ -102,9 +115,15 @@ force_reset() -> reset(true). is_clustered() -> - RunningNodes = mnesia:system_info(running_db_nodes), + RunningNodes = running_clustered_nodes(), [node()] /= RunningNodes andalso [] /= RunningNodes. +all_clustered_nodes() -> + mnesia:system_info(db_nodes). + +running_clustered_nodes() -> + mnesia:system_info(running_db_nodes). + empty_ram_only_tables() -> Node = node(), lists:foreach( @@ -118,58 +137,98 @@ %%-------------------------------------------------------------------- +nodes_of_type(Type) -> + %% This function should return the nodes of a certain type (ram, + %% disc or disc_only) in the current cluster. The type of nodes + %% is determined when the cluster is initially configured. + %% Specifically, we check whether a certain table, which we know + %% will be written to disk on a disc node, is stored on disk or in + %% RAM. + mnesia:table_info(rabbit_durable_exchange, Type). + table_definitions() -> [{rabbit_user, - [{record_name, user}, - {attributes, record_info(fields, user)}, - {disc_copies, [node()]}]}, + [{record_name, internal_user}, + {attributes, record_info(fields, internal_user)}, + {disc_copies, [node()]}, + {match, #internal_user{_='_'}}]}, {rabbit_user_permission, [{record_name, user_permission}, {attributes, record_info(fields, user_permission)}, - {disc_copies, [node()]}]}, + {disc_copies, [node()]}, + {match, #user_permission{user_vhost = #user_vhost{_='_'}, + permission = #permission{_='_'}, + _='_'}}]}, {rabbit_vhost, [{record_name, vhost}, {attributes, record_info(fields, vhost)}, - {disc_copies, [node()]}]}, - {rabbit_config, - [{disc_copies, [node()]}]}, + {disc_copies, [node()]}, + {match, #vhost{_='_'}}]}, {rabbit_listener, [{record_name, listener}, {attributes, record_info(fields, listener)}, - {type, bag}]}, + {type, bag}, + {match, #listener{_='_'}}]}, {rabbit_durable_route, [{record_name, route}, {attributes, record_info(fields, route)}, - {disc_copies, [node()]}]}, + {disc_copies, [node()]}, + {match, #route{binding = binding_match(), _='_'}}]}, {rabbit_route, [{record_name, route}, {attributes, record_info(fields, route)}, - {type, ordered_set}]}, + {type, ordered_set}, + {match, #route{binding = binding_match(), _='_'}}]}, {rabbit_reverse_route, [{record_name, reverse_route}, {attributes, record_info(fields, reverse_route)}, - {type, ordered_set}]}, + {type, ordered_set}, + {match, #reverse_route{reverse_binding = reverse_binding_match(), + _='_'}}]}, + %% Consider the implications to nodes_of_type/1 before altering + %% the next entry. {rabbit_durable_exchange, [{record_name, exchange}, {attributes, record_info(fields, exchange)}, - {disc_copies, [node()]}]}, + {disc_copies, [node()]}, + {match, #exchange{name = exchange_name_match(), _='_'}}]}, {rabbit_exchange, [{record_name, exchange}, - {attributes, record_info(fields, exchange)}]}, + {attributes, record_info(fields, exchange)}, + {match, #exchange{name = exchange_name_match(), _='_'}}]}, {rabbit_durable_queue, [{record_name, amqqueue}, {attributes, record_info(fields, amqqueue)}, - {disc_copies, [node()]}]}, + {disc_copies, [node()]}, + {match, #amqqueue{name = queue_name_match(), _='_'}}]}, {rabbit_queue, [{record_name, amqqueue}, - {attributes, record_info(fields, amqqueue)}]}]. + {attributes, record_info(fields, amqqueue)}, + {match, #amqqueue{name = queue_name_match(), _='_'}}]}]. + +binding_match() -> + #binding{source = exchange_name_match(), + destination = binding_destination_match(), + _='_'}. +reverse_binding_match() -> + #reverse_binding{destination = binding_destination_match(), + source = exchange_name_match(), + _='_'}. +binding_destination_match() -> + resource_match('_'). +exchange_name_match() -> + resource_match(exchange). +queue_name_match() -> + resource_match(queue). +resource_match(Kind) -> + #resource{kind = Kind, _='_'}. table_names() -> [Tab || {Tab, _} <- table_definitions()]. replicated_table_names() -> - [Tab || {Tab, Attrs} <- table_definitions(), - not lists:member({local_content, true}, Attrs) + [Tab || {Tab, TabDef} <- table_definitions(), + not lists:member({local_content, true}, TabDef) ]. dir() -> mnesia:system_info(directory). @@ -179,26 +238,69 @@ case filelib:ensure_dir(MnesiaDir) of {error, Reason} -> throw({error, {cannot_create_mnesia_dir, MnesiaDir, Reason}}); - ok -> ok + ok -> + ok end. ensure_mnesia_running() -> case mnesia:system_info(is_running) of yes -> ok; - no -> throw({error, mnesia_not_running}) + no -> throw({error, mnesia_not_running}) end. ensure_mnesia_not_running() -> case mnesia:system_info(is_running) of - no -> ok; + no -> ok; yes -> throw({error, mnesia_unexpectedly_running}) end. +ensure_schema_integrity() -> + case check_schema_integrity() of + ok -> + ok; + {error, Reason} -> + throw({error, {schema_integrity_check_failed, Reason}}) + end. + check_schema_integrity() -> - %%TODO: more thorough checks - case catch [mnesia:table_info(Tab, version) || Tab <- table_names()] of - {'EXIT', Reason} -> {error, Reason}; - _ -> ok + Tables = mnesia:system_info(tables), + case [Error || {Tab, TabDef} <- table_definitions(), + case lists:member(Tab, Tables) of + false -> + Error = {table_missing, Tab}, + true; + true -> + {_, ExpAttrs} = proplists:lookup(attributes, TabDef), + Attrs = mnesia:table_info(Tab, attributes), + Error = {table_attributes_mismatch, Tab, + ExpAttrs, Attrs}, + Attrs /= ExpAttrs + end] of + [] -> check_table_integrity(); + Errors -> {error, Errors} + end. + +check_table_integrity() -> + ok = wait_for_tables(), + case lists:all(fun ({Tab, TabDef}) -> + {_, Match} = proplists:lookup(match, TabDef), + read_test_table(Tab, Match) + end, table_definitions()) of + true -> ok; + false -> {error, invalid_table_content} + end. + +read_test_table(Tab, Match) -> + case mnesia:dirty_first(Tab) of + '$end_of_table' -> + true; + Key -> + ObjList = mnesia:dirty_read(Tab, Key), + MatchComp = ets:match_spec_compile([{Match, [], ['$_']}]), + case ets:match_spec_run(ObjList, MatchComp) of + ObjList -> true; + _ -> false + end end. %% The cluster node config file contains some or all of the disk nodes @@ -227,21 +329,8 @@ case rabbit_misc:read_term_file(FileName) of {ok, [ClusterNodes]} -> ClusterNodes; {error, enoent} -> - case application:get_env(cluster_config) of - undefined -> []; - {ok, DefaultFileName} -> - case file:consult(DefaultFileName) of - {ok, [ClusterNodes]} -> ClusterNodes; - {error, enoent} -> - error_logger:warning_msg( - "default cluster config file ~p does not exist~n", - [DefaultFileName]), - []; - {error, Reason} -> - throw({error, {cannot_read_cluster_nodes_config, - DefaultFileName, Reason}}) - end - end; + {ok, ClusterNodes} = application:get_env(rabbit, cluster_nodes), + ClusterNodes; {error, Reason} -> throw({error, {cannot_read_cluster_nodes_config, FileName, Reason}}) @@ -259,44 +348,91 @@ %% Take a cluster node config and create the right kind of node - a %% standalone disk node, or disk or ram node connected to the -%% specified cluster nodes. -init_db(ClusterNodes) -> - case mnesia:change_config(extra_db_nodes, ClusterNodes -- [node()]) of - {ok, []} -> - case mnesia:system_info(use_dir) of - true -> - case check_schema_integrity() of - ok -> - ok; - {error, Reason} -> - %% NB: we cannot use rabbit_log here since - %% it may not have been started yet - error_logger:warning_msg( - "schema integrity check failed: ~p~n" - "moving database to backup location " - "and recreating schema from scratch~n", - [Reason]), - ok = move_db(), - ok = create_schema() +%% specified cluster nodes. If Force is false, don't allow +%% connections to offline nodes. +init_db(ClusterNodes, Force) -> + UClusterNodes = lists:usort(ClusterNodes), + ProperClusterNodes = UClusterNodes -- [node()], + case mnesia:change_config(extra_db_nodes, ProperClusterNodes) of + {ok, Nodes} -> + case Force of + false -> FailedClusterNodes = ProperClusterNodes -- Nodes, + case FailedClusterNodes of + [] -> ok; + _ -> throw({error, {failed_to_cluster_with, + FailedClusterNodes, + "Mnesia could not connect " + "to some nodes."}}) + end; + true -> ok + end, + case {Nodes, mnesia:system_info(use_dir), all_clustered_nodes()} of + {[], true, [_]} -> + %% True single disc node, attempt upgrade + ok = wait_for_tables(), + case rabbit_upgrade:maybe_upgrade() of + ok -> ensure_schema_ok(); + version_not_available -> schema_ok_or_move() end; - false -> - ok = create_schema() + {[], true, _} -> + %% "Master" (i.e. without config) disc node in cluster, + %% verify schema + ok = wait_for_tables(), + ensure_version_ok(rabbit_upgrade:read_version()), + ensure_schema_ok(); + {[], false, _} -> + %% Nothing there at all, start from scratch + ok = create_schema(); + {[AnotherNode|_], _, _} -> + %% Subsequent node in cluster, catch up + ensure_version_ok(rabbit_upgrade:read_version()), + ensure_version_ok( + rpc:call(AnotherNode, rabbit_upgrade, read_version, [])), + IsDiskNode = ClusterNodes == [] orelse + lists:member(node(), ClusterNodes), + ok = wait_for_replicated_tables(), + ok = create_local_table_copy(schema, disc_copies), + ok = create_local_table_copies(case IsDiskNode of + true -> disc; + false -> ram + end), + ensure_schema_ok() end; - {ok, [_|_]} -> - IsDiskNode = ClusterNodes == [] orelse - lists:member(node(), ClusterNodes), - ok = wait_for_replicated_tables(), - ok = create_local_table_copy(schema, disc_copies), - ok = create_local_table_copies(case IsDiskNode of - true -> disc; - false -> ram - end); {error, Reason} -> %% one reason we may end up here is if we try to join %% nodes together that are currently running standalone or %% are members of a different cluster - throw({error, {unable_to_join_cluster, - ClusterNodes, Reason}}) + throw({error, {unable_to_join_cluster, ClusterNodes, Reason}}) + end. + +schema_ok_or_move() -> + case check_schema_integrity() of + ok -> + ok; + {error, Reason} -> + %% NB: we cannot use rabbit_log here since it may not have been + %% started yet + error_logger:warning_msg("schema integrity check failed: ~p~n" + "moving database to backup location " + "and recreating schema from scratch~n", + [Reason]), + ok = move_db(), + ok = create_schema() + end. + +ensure_version_ok({ok, DiscVersion}) -> + case rabbit_upgrade:desired_version() of + DiscVersion -> ok; + DesiredVersion -> throw({error, {schema_mismatch, + DesiredVersion, DiscVersion}}) + end; +ensure_version_ok({error, _}) -> + ok = rabbit_upgrade:write_version(). + +ensure_schema_ok() -> + case check_schema_integrity() of + ok -> ok; + {error, Reason} -> throw({error, {schema_invalid, Reason}}) end. create_schema() -> @@ -305,7 +441,10 @@ cannot_create_schema), rabbit_misc:ensure_ok(mnesia:start(), cannot_start_mnesia), - create_tables(). + ok = create_tables(), + ok = ensure_schema_integrity(), + ok = wait_for_tables(), + ok = rabbit_upgrade:write_version(). move_db() -> mnesia:stop(), @@ -329,13 +468,24 @@ rabbit_misc:ensure_ok(mnesia:start(), cannot_start_mnesia), ok. +copy_db(Destination) -> + mnesia:stop(), + case rabbit_misc:recursive_copy(dir(), Destination) of + ok -> + rabbit_misc:ensure_ok(mnesia:start(), cannot_start_mnesia), + ok = wait_for_tables(); + {error, E} -> + {error, E} + end. + create_tables() -> - lists:foreach(fun ({Tab, TabArgs}) -> - case mnesia:create_table(Tab, TabArgs) of + lists:foreach(fun ({Tab, TabDef}) -> + TabDef1 = proplists:delete(match, TabDef), + case mnesia:create_table(Tab, TabDef1) of {atomic, ok} -> ok; {aborted, Reason} -> throw({error, {table_creation_failed, - Tab, TabArgs, Reason}}) + Tab, TabDef1, Reason}}) end end, table_definitions()), @@ -346,7 +496,7 @@ create_local_table_copies(Type) -> lists:foreach( - fun({Tab, TabDef}) -> + fun ({Tab, TabDef}) -> HasDiscCopies = table_has_copy_type(TabDef, disc_copies), HasDiscOnlyCopies = table_has_copy_type(TabDef, disc_only_copies), LocalTab = proplists:get_bool(local_content, TabDef), @@ -390,17 +540,12 @@ wait_for_tables() -> wait_for_tables(table_names()). wait_for_tables(TableNames) -> - case check_schema_integrity() of - ok -> - case mnesia:wait_for_tables(TableNames, 30000) of - ok -> ok; - {timeout, BadTabs} -> - throw({error, {timeout_waiting_for_tables, BadTabs}}); - {error, Reason} -> - throw({error, {failed_waiting_for_tables, Reason}}) - end; + case mnesia:wait_for_tables(TableNames, 30000) of + ok -> ok; + {timeout, BadTabs} -> + throw({error, {timeout_waiting_for_tables, BadTabs}}); {error, Reason} -> - throw({error, {schema_integrity_check_failed, Reason}}) + throw({error, {failed_waiting_for_tables, Reason}}) end. reset(Force) -> @@ -414,8 +559,8 @@ {Nodes, RunningNodes} = try ok = init(), - {mnesia:system_info(db_nodes) -- [Node], - mnesia:system_info(running_db_nodes) -- [Node]} + {all_clustered_nodes() -- [Node], + running_clustered_nodes() -- [Node]} after mnesia:stop() end, @@ -424,9 +569,8 @@ cannot_delete_schema) end, ok = delete_cluster_nodes_config(), - %% remove persistet messages and any other garbage we find - lists:foreach(fun file:delete/1, - filelib:wildcard(dir() ++ "/*")), + %% remove persisted messages and any other garbage we find + ok = rabbit_misc:recursive_delete(filelib:wildcard(dir() ++ "/*")), ok. leave_cluster([], _) -> ok; diff -Nru rabbitmq-server-1.7.2/src/rabbit_msg_file.erl rabbitmq-server-2.3.1/src/rabbit_msg_file.erl --- rabbitmq-server-1.7.2/src/rabbit_msg_file.erl 1970-01-01 00:00:00.000000000 +0000 +++ rabbitmq-server-2.3.1/src/rabbit_msg_file.erl 2011-02-03 12:47:35.000000000 +0000 @@ -0,0 +1,121 @@ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License +%% at http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and +%% limitations under the License. +%% +%% The Original Code is RabbitMQ. +%% +%% The Initial Developer of the Original Code is VMware, Inc. +%% Copyright (c) 2007-2011 VMware, Inc. All rights reserved. +%% + +-module(rabbit_msg_file). + +-export([append/3, read/2, scan/2]). + +%%---------------------------------------------------------------------------- + +-include("rabbit_msg_store.hrl"). + +-define(INTEGER_SIZE_BYTES, 8). +-define(INTEGER_SIZE_BITS, (8 * ?INTEGER_SIZE_BYTES)). +-define(WRITE_OK_SIZE_BITS, 8). +-define(WRITE_OK_MARKER, 255). +-define(FILE_PACKING_ADJUSTMENT, (1 + ?INTEGER_SIZE_BYTES)). +-define(GUID_SIZE_BYTES, 16). +-define(GUID_SIZE_BITS, (8 * ?GUID_SIZE_BYTES)). +-define(SCAN_BLOCK_SIZE, 4194304). %% 4MB + +%%---------------------------------------------------------------------------- + +-ifdef(use_specs). + +-type(io_device() :: any()). +-type(position() :: non_neg_integer()). +-type(msg_size() :: non_neg_integer()). +-type(file_size() :: non_neg_integer()). + +-spec(append/3 :: (io_device(), rabbit_guid:guid(), msg()) -> + rabbit_types:ok_or_error2(msg_size(), any())). +-spec(read/2 :: (io_device(), msg_size()) -> + rabbit_types:ok_or_error2({rabbit_guid:guid(), msg()}, + any())). +-spec(scan/2 :: (io_device(), file_size()) -> + {'ok', [{rabbit_guid:guid(), msg_size(), position()}], + position()}). + +-endif. + +%%---------------------------------------------------------------------------- + +append(FileHdl, Guid, MsgBody) + when is_binary(Guid) andalso size(Guid) =:= ?GUID_SIZE_BYTES -> + MsgBodyBin = term_to_binary(MsgBody), + MsgBodyBinSize = size(MsgBodyBin), + Size = MsgBodyBinSize + ?GUID_SIZE_BYTES, + case file_handle_cache:append(FileHdl, + <>) of + ok -> {ok, Size + ?FILE_PACKING_ADJUSTMENT}; + KO -> KO + end. + +read(FileHdl, TotalSize) -> + Size = TotalSize - ?FILE_PACKING_ADJUSTMENT, + BodyBinSize = Size - ?GUID_SIZE_BYTES, + case file_handle_cache:read(FileHdl, TotalSize) of + {ok, <>} -> + {ok, {Guid, binary_to_term(MsgBodyBin)}}; + KO -> KO + end. + +scan(FileHdl, FileSize) when FileSize >= 0 -> + scan(FileHdl, FileSize, <<>>, 0, [], 0). + +scan(_FileHdl, FileSize, _Data, FileSize, Acc, ScanOffset) -> + {ok, Acc, ScanOffset}; +scan(FileHdl, FileSize, Data, ReadOffset, Acc, ScanOffset) -> + Read = lists:min([?SCAN_BLOCK_SIZE, (FileSize - ReadOffset)]), + case file_handle_cache:read(FileHdl, Read) of + {ok, Data1} -> + {Data2, Acc1, ScanOffset1} = + scan(<>, Acc, ScanOffset), + ReadOffset1 = ReadOffset + size(Data1), + scan(FileHdl, FileSize, Data2, ReadOffset1, Acc1, ScanOffset1); + _KO -> + {ok, Acc, ScanOffset} + end. + +scan(<<>>, Acc, Offset) -> + {<<>>, Acc, Offset}; +scan(<<0:?INTEGER_SIZE_BITS, _Rest/binary>>, Acc, Offset) -> + {<<>>, Acc, Offset}; %% Nothing to do other than stop. +scan(<>, Acc, Offset) -> + TotalSize = Size + ?FILE_PACKING_ADJUSTMENT, + case WriteMarker of + ?WRITE_OK_MARKER -> + %% Here we take option 5 from + %% http://www.erlang.org/cgi-bin/ezmlm-cgi?2:mss:1569 in + %% which we read the Guid as a number, and then convert it + %% back to a binary in order to work around bugs in + %% Erlang's GC. + <> = + <>, + <> = <>, + scan(Rest, [{Guid, TotalSize, Offset} | Acc], Offset + TotalSize); + _ -> + scan(Rest, Acc, Offset + TotalSize) + end; +scan(Data, Acc, Offset) -> + {Data, Acc, Offset}. diff -Nru rabbitmq-server-1.7.2/src/rabbit_msg_store.erl rabbitmq-server-2.3.1/src/rabbit_msg_store.erl --- rabbitmq-server-1.7.2/src/rabbit_msg_store.erl 1970-01-01 00:00:00.000000000 +0000 +++ rabbitmq-server-2.3.1/src/rabbit_msg_store.erl 2011-02-03 12:47:35.000000000 +0000 @@ -0,0 +1,1958 @@ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License +%% at http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and +%% limitations under the License. +%% +%% The Original Code is RabbitMQ. +%% +%% The Initial Developer of the Original Code is VMware, Inc. +%% Copyright (c) 2007-2011 VMware, Inc. All rights reserved. +%% + +-module(rabbit_msg_store). + +-behaviour(gen_server2). + +-export([start_link/4, successfully_recovered_state/1, + client_init/4, client_terminate/1, client_delete_and_terminate/1, + client_ref/1, close_all_indicated/1, + write/3, read/2, contains/2, remove/2, release/2, sync/3]). + +-export([sync/1, set_maximum_since_use/2, + has_readers/2, combine_files/3, delete_file/2]). %% internal + +-export([init/1, handle_call/3, handle_cast/2, handle_info/2, + terminate/2, code_change/3, prioritise_call/3, prioritise_cast/2]). + +%%---------------------------------------------------------------------------- + +-include("rabbit_msg_store.hrl"). + +-define(SYNC_INTERVAL, 5). %% milliseconds +-define(CLEAN_FILENAME, "clean.dot"). +-define(FILE_SUMMARY_FILENAME, "file_summary.ets"). + +-define(BINARY_MODE, [raw, binary]). +-define(READ_MODE, [read]). +-define(READ_AHEAD_MODE, [read_ahead | ?READ_MODE]). +-define(WRITE_MODE, [write]). + +-define(FILE_EXTENSION, ".rdq"). +-define(FILE_EXTENSION_TMP, ".rdt"). + +-define(HANDLE_CACHE_BUFFER_SIZE, 1048576). %% 1MB + +%%---------------------------------------------------------------------------- + +-record(msstate, + { dir, %% store directory + index_module, %% the module for index ops + index_state, %% where are messages? + current_file, %% current file name as number + current_file_handle, %% current file handle since the last fsync? + file_handle_cache, %% file handle cache + on_sync, %% pending sync requests + sync_timer_ref, %% TRef for our interval timer + sum_valid_data, %% sum of valid data in all files + sum_file_size, %% sum of file sizes + pending_gc_completion, %% things to do once GC completes + gc_pid, %% pid of our GC + file_handles_ets, %% tid of the shared file handles table + file_summary_ets, %% tid of the file summary table + dedup_cache_ets, %% tid of dedup cache table + cur_file_cache_ets, %% tid of current file cache table + dying_clients, %% set of dying clients + clients, %% map of references of all registered clients + %% to callbacks + successfully_recovered, %% boolean: did we recover state? + file_size_limit, %% how big are our files allowed to get? + cref_to_guids %% client ref to synced messages mapping + }). + +-record(client_msstate, + { server, + client_ref, + file_handle_cache, + index_state, + index_module, + dir, + gc_pid, + file_handles_ets, + file_summary_ets, + dedup_cache_ets, + cur_file_cache_ets + }). + +-record(file_summary, + {file, valid_total_size, left, right, file_size, locked, readers}). + +-record(gc_state, + { dir, + index_module, + index_state, + file_summary_ets, + file_handles_ets, + msg_store + }). + +%%---------------------------------------------------------------------------- + +-ifdef(use_specs). + +-export_type([gc_state/0, file_num/0]). + +-type(gc_state() :: #gc_state { dir :: file:filename(), + index_module :: atom(), + index_state :: any(), + file_summary_ets :: ets:tid(), + file_handles_ets :: ets:tid(), + msg_store :: server() + }). + +-type(server() :: pid() | atom()). +-type(client_ref() :: binary()). +-type(file_num() :: non_neg_integer()). +-type(client_msstate() :: #client_msstate { + server :: server(), + client_ref :: client_ref(), + file_handle_cache :: dict(), + index_state :: any(), + index_module :: atom(), + dir :: file:filename(), + gc_pid :: pid(), + file_handles_ets :: ets:tid(), + file_summary_ets :: ets:tid(), + dedup_cache_ets :: ets:tid(), + cur_file_cache_ets :: ets:tid()}). +-type(startup_fun_state() :: + {(fun ((A) -> 'finished' | {rabbit_guid:guid(), non_neg_integer(), A})), + A}). +-type(maybe_guid_fun() :: 'undefined' | fun ((gb_set()) -> any())). +-type(maybe_close_fds_fun() :: 'undefined' | fun (() -> 'ok')). +-type(deletion_thunk() :: fun (() -> boolean())). + +-spec(start_link/4 :: + (atom(), file:filename(), [binary()] | 'undefined', + startup_fun_state()) -> rabbit_types:ok_pid_or_error()). +-spec(successfully_recovered_state/1 :: (server()) -> boolean()). +-spec(client_init/4 :: (server(), client_ref(), maybe_guid_fun(), + maybe_close_fds_fun()) -> client_msstate()). +-spec(client_terminate/1 :: (client_msstate()) -> 'ok'). +-spec(client_delete_and_terminate/1 :: (client_msstate()) -> 'ok'). +-spec(client_ref/1 :: (client_msstate()) -> client_ref()). +-spec(write/3 :: (rabbit_guid:guid(), msg(), client_msstate()) -> 'ok'). +-spec(read/2 :: (rabbit_guid:guid(), client_msstate()) -> + {rabbit_types:ok(msg()) | 'not_found', client_msstate()}). +-spec(contains/2 :: (rabbit_guid:guid(), client_msstate()) -> boolean()). +-spec(remove/2 :: ([rabbit_guid:guid()], client_msstate()) -> 'ok'). +-spec(release/2 :: ([rabbit_guid:guid()], client_msstate()) -> 'ok'). +-spec(sync/3 :: ([rabbit_guid:guid()], fun (() -> any()), client_msstate()) -> + 'ok'). + +-spec(sync/1 :: (server()) -> 'ok'). +-spec(set_maximum_since_use/2 :: (server(), non_neg_integer()) -> 'ok'). +-spec(has_readers/2 :: (non_neg_integer(), gc_state()) -> boolean()). +-spec(combine_files/3 :: (non_neg_integer(), non_neg_integer(), gc_state()) -> + deletion_thunk()). +-spec(delete_file/2 :: (non_neg_integer(), gc_state()) -> deletion_thunk()). + +-endif. + +%%---------------------------------------------------------------------------- + +%% We run GC whenever (garbage / sum_file_size) > ?GARBAGE_FRACTION +%% It is not recommended to set this to < 0.5 +-define(GARBAGE_FRACTION, 0.5). + +%% The components: +%% +%% Index: this is a mapping from Guid to #msg_location{}: +%% {Guid, RefCount, File, Offset, TotalSize} +%% By default, it's in ets, but it's also pluggable. +%% FileSummary: this is an ets table which maps File to #file_summary{}: +%% {File, ValidTotalSize, Left, Right, FileSize, Locked, Readers} +%% +%% The basic idea is that messages are appended to the current file up +%% until that file becomes too big (> file_size_limit). At that point, +%% the file is closed and a new file is created on the _right_ of the +%% old file which is used for new messages. Files are named +%% numerically ascending, thus the file with the lowest name is the +%% eldest file. +%% +%% We need to keep track of which messages are in which files (this is +%% the Index); how much useful data is in each file and which files +%% are on the left and right of each other. This is the purpose of the +%% FileSummary ets table. +%% +%% As messages are removed from files, holes appear in these +%% files. The field ValidTotalSize contains the total amount of useful +%% data left in the file. This is needed for garbage collection. +%% +%% When we discover that a file is now empty, we delete it. When we +%% discover that it can be combined with the useful data in either its +%% left or right neighbour, and overall, across all the files, we have +%% ((the amount of garbage) / (the sum of all file sizes)) > +%% ?GARBAGE_FRACTION, we start a garbage collection run concurrently, +%% which will compact the two files together. This keeps disk +%% utilisation high and aids performance. We deliberately do this +%% lazily in order to prevent doing GC on files which are soon to be +%% emptied (and hence deleted) soon. +%% +%% Given the compaction between two files, the left file (i.e. elder +%% file) is considered the ultimate destination for the good data in +%% the right file. If necessary, the good data in the left file which +%% is fragmented throughout the file is written out to a temporary +%% file, then read back in to form a contiguous chunk of good data at +%% the start of the left file. Thus the left file is garbage collected +%% and compacted. Then the good data from the right file is copied +%% onto the end of the left file. Index and FileSummary tables are +%% updated. +%% +%% On non-clean startup, we scan the files we discover, dealing with +%% the possibilites of a crash having occured during a compaction +%% (this consists of tidyup - the compaction is deliberately designed +%% such that data is duplicated on disk rather than risking it being +%% lost), and rebuild the FileSummary ets table and Index. +%% +%% So, with this design, messages move to the left. Eventually, they +%% should end up in a contiguous block on the left and are then never +%% rewritten. But this isn't quite the case. If in a file there is one +%% message that is being ignored, for some reason, and messages in the +%% file to the right and in the current block are being read all the +%% time then it will repeatedly be the case that the good data from +%% both files can be combined and will be written out to a new +%% file. Whenever this happens, our shunned message will be rewritten. +%% +%% So, provided that we combine messages in the right order, +%% (i.e. left file, bottom to top, right file, bottom to top), +%% eventually our shunned message will end up at the bottom of the +%% left file. The compaction/combining algorithm is smart enough to +%% read in good data from the left file that is scattered throughout +%% (i.e. C and D in the below diagram), then truncate the file to just +%% above B (i.e. truncate to the limit of the good contiguous region +%% at the start of the file), then write C and D on top and then write +%% E, F and G from the right file on top. Thus contiguous blocks of +%% good data at the bottom of files are not rewritten. +%% +%% +-------+ +-------+ +-------+ +%% | X | | G | | G | +%% +-------+ +-------+ +-------+ +%% | D | | X | | F | +%% +-------+ +-------+ +-------+ +%% | X | | X | | E | +%% +-------+ +-------+ +-------+ +%% | C | | F | ===> | D | +%% +-------+ +-------+ +-------+ +%% | X | | X | | C | +%% +-------+ +-------+ +-------+ +%% | B | | X | | B | +%% +-------+ +-------+ +-------+ +%% | A | | E | | A | +%% +-------+ +-------+ +-------+ +%% left right left +%% +%% From this reasoning, we do have a bound on the number of times the +%% message is rewritten. From when it is inserted, there can be no +%% files inserted between it and the head of the queue, and the worst +%% case is that everytime it is rewritten, it moves one position lower +%% in the file (for it to stay at the same position requires that +%% there are no holes beneath it, which means truncate would be used +%% and so it would not be rewritten at all). Thus this seems to +%% suggest the limit is the number of messages ahead of it in the +%% queue, though it's likely that that's pessimistic, given the +%% requirements for compaction/combination of files. +%% +%% The other property is that we have is the bound on the lowest +%% utilisation, which should be 50% - worst case is that all files are +%% fractionally over half full and can't be combined (equivalent is +%% alternating full files and files with only one tiny message in +%% them). +%% +%% Messages are reference-counted. When a message with the same guid +%% is written several times we only store it once, and only remove it +%% from the store when it has been removed the same number of times. +%% +%% The reference counts do not persist. Therefore the initialisation +%% function must be provided with a generator that produces ref count +%% deltas for all recovered messages. This is only used on startup +%% when the shutdown was non-clean. +%% +%% Read messages with a reference count greater than one are entered +%% into a message cache. The purpose of the cache is not especially +%% performance, though it can help there too, but prevention of memory +%% explosion. It ensures that as messages with a high reference count +%% are read from several processes they are read back as the same +%% binary object rather than multiples of identical binary +%% objects. +%% +%% Reads can be performed directly by clients without calling to the +%% server. This is safe because multiple file handles can be used to +%% read files. However, locking is used by the concurrent GC to make +%% sure that reads are not attempted from files which are in the +%% process of being garbage collected. +%% +%% When a message is removed, its reference count is decremented. Even +%% if the reference count becomes 0, its entry is not removed. This is +%% because in the event of the same message being sent to several +%% different queues, there is the possibility of one queue writing and +%% removing the message before other queues write it at all. Thus +%% accomodating 0-reference counts allows us to avoid unnecessary +%% writes here. Of course, there are complications: the file to which +%% the message has already been written could be locked pending +%% deletion or GC, which means we have to rewrite the message as the +%% original copy will now be lost. +%% +%% The server automatically defers reads, removes and contains calls +%% that occur which refer to files which are currently being +%% GC'd. Contains calls are only deferred in order to ensure they do +%% not overtake removes. +%% +%% The current file to which messages are being written has a +%% write-back cache. This is written to immediately by clients and can +%% be read from by clients too. This means that there are only ever +%% writes made to the current file, thus eliminating delays due to +%% flushing write buffers in order to be able to safely read from the +%% current file. The one exception to this is that on start up, the +%% cache is not populated with msgs found in the current file, and +%% thus in this case only, reads may have to come from the file +%% itself. The effect of this is that even if the msg_store process is +%% heavily overloaded, clients can still write and read messages with +%% very low latency and not block at all. +%% +%% Clients of the msg_store are required to register before using the +%% msg_store. This provides them with the necessary client-side state +%% to allow them to directly access the various caches and files. When +%% they terminate, they should deregister. They can do this by calling +%% either client_terminate/1 or client_delete_and_terminate/1. The +%% differences are: (a) client_terminate is synchronous. As a result, +%% if the msg_store is badly overloaded and has lots of in-flight +%% writes and removes to process, this will take some time to +%% return. However, once it does return, you can be sure that all the +%% actions you've issued to the msg_store have been processed. (b) Not +%% only is client_delete_and_terminate/1 asynchronous, but it also +%% permits writes and subsequent removes from the current +%% (terminating) client which are still in flight to be safely +%% ignored. Thus from the point of view of the msg_store itself, and +%% all from the same client: +%% +%% (T) = termination; (WN) = write of msg N; (RN) = remove of msg N +%% --> W1, W2, W1, R1, T, W3, R2, W2, R1, R2, R3, W4 --> +%% +%% The client obviously sent T after all the other messages (up to +%% W4), but because the msg_store prioritises messages, the T can be +%% promoted and thus received early. +%% +%% Thus at the point of the msg_store receiving T, we have messages 1 +%% and 2 with a refcount of 1. After T, W3 will be ignored because +%% it's an unknown message, as will R3, and W4. W2, R1 and R2 won't be +%% ignored because the messages that they refer to were already known +%% to the msg_store prior to T. However, it can be a little more +%% complex: after the first R2, the refcount of msg 2 is 0. At that +%% point, if a GC occurs or file deletion, msg 2 could vanish, which +%% would then mean that the subsequent W2 and R2 are then ignored. +%% +%% The use case then for client_delete_and_terminate/1 is if the +%% client wishes to remove everything it's written to the msg_store: +%% it issues removes for all messages it's written and not removed, +%% and then calls client_delete_and_terminate/1. At that point, any +%% in-flight writes (and subsequent removes) can be ignored, but +%% removes and writes for messages the msg_store already knows about +%% will continue to be processed normally (which will normally just +%% involve modifying the reference count, which is fast). Thus we save +%% disk bandwidth for writes which are going to be immediately removed +%% again by the the terminating client. +%% +%% We use a separate set to keep track of the dying clients in order +%% to keep that set, which is inspected on every write and remove, as +%% small as possible. Inspecting the set of all clients would degrade +%% performance with many healthy clients and few, if any, dying +%% clients, which is the typical case. +%% +%% For notes on Clean Shutdown and startup, see documentation in +%% variable_queue. + +%%---------------------------------------------------------------------------- +%% public API +%%---------------------------------------------------------------------------- + +start_link(Server, Dir, ClientRefs, StartupFunState) -> + gen_server2:start_link({local, Server}, ?MODULE, + [Server, Dir, ClientRefs, StartupFunState], + [{timeout, infinity}]). + +successfully_recovered_state(Server) -> + gen_server2:call(Server, successfully_recovered_state, infinity). + +client_init(Server, Ref, MsgOnDiskFun, CloseFDsFun) -> + {IState, IModule, Dir, GCPid, + FileHandlesEts, FileSummaryEts, DedupCacheEts, CurFileCacheEts} = + gen_server2:call( + Server, {new_client_state, Ref, MsgOnDiskFun, CloseFDsFun}, infinity), + #client_msstate { server = Server, + client_ref = Ref, + file_handle_cache = dict:new(), + index_state = IState, + index_module = IModule, + dir = Dir, + gc_pid = GCPid, + file_handles_ets = FileHandlesEts, + file_summary_ets = FileSummaryEts, + dedup_cache_ets = DedupCacheEts, + cur_file_cache_ets = CurFileCacheEts }. + +client_terminate(CState = #client_msstate { client_ref = Ref }) -> + close_all_handles(CState), + ok = server_call(CState, {client_terminate, Ref}). + +client_delete_and_terminate(CState = #client_msstate { client_ref = Ref }) -> + close_all_handles(CState), + ok = server_cast(CState, {client_dying, Ref}), + ok = server_cast(CState, {client_delete, Ref}). + +client_ref(#client_msstate { client_ref = Ref }) -> Ref. + +write(Guid, Msg, + CState = #client_msstate { cur_file_cache_ets = CurFileCacheEts, + client_ref = CRef }) -> + ok = update_msg_cache(CurFileCacheEts, Guid, Msg), + ok = server_cast(CState, {write, CRef, Guid}). + +read(Guid, + CState = #client_msstate { dedup_cache_ets = DedupCacheEts, + cur_file_cache_ets = CurFileCacheEts }) -> + %% 1. Check the dedup cache + case fetch_and_increment_cache(DedupCacheEts, Guid) of + not_found -> + %% 2. Check the cur file cache + case ets:lookup(CurFileCacheEts, Guid) of + [] -> + Defer = fun() -> + {server_call(CState, {read, Guid}), CState} + end, + case index_lookup_positive_ref_count(Guid, CState) of + not_found -> Defer(); + MsgLocation -> client_read1(MsgLocation, Defer, CState) + end; + [{Guid, Msg, _CacheRefCount}] -> + %% Although we've found it, we don't know the + %% refcount, so can't insert into dedup cache + {{ok, Msg}, CState} + end; + Msg -> + {{ok, Msg}, CState} + end. + +contains(Guid, CState) -> server_call(CState, {contains, Guid}). +remove([], _CState) -> ok; +remove(Guids, CState = #client_msstate { client_ref = CRef }) -> + server_cast(CState, {remove, CRef, Guids}). +release([], _CState) -> ok; +release(Guids, CState) -> server_cast(CState, {release, Guids}). +sync(Guids, K, CState) -> server_cast(CState, {sync, Guids, K}). + +sync(Server) -> + gen_server2:cast(Server, sync). + +set_maximum_since_use(Server, Age) -> + gen_server2:cast(Server, {set_maximum_since_use, Age}). + +%%---------------------------------------------------------------------------- +%% Client-side-only helpers +%%---------------------------------------------------------------------------- + +server_call(#client_msstate { server = Server }, Msg) -> + gen_server2:call(Server, Msg, infinity). + +server_cast(#client_msstate { server = Server }, Msg) -> + gen_server2:cast(Server, Msg). + +client_read1(#msg_location { guid = Guid, file = File } = MsgLocation, Defer, + CState = #client_msstate { file_summary_ets = FileSummaryEts }) -> + case ets:lookup(FileSummaryEts, File) of + [] -> %% File has been GC'd and no longer exists. Go around again. + read(Guid, CState); + [#file_summary { locked = Locked, right = Right }] -> + client_read2(Locked, Right, MsgLocation, Defer, CState) + end. + +client_read2(false, undefined, _MsgLocation, Defer, _CState) -> + %% Although we've already checked both caches and not found the + %% message there, the message is apparently in the + %% current_file. We can only arrive here if we are trying to read + %% a message which we have not written, which is very odd, so just + %% defer. + %% + %% OR, on startup, the cur_file_cache is not populated with the + %% contents of the current file, thus reads from the current file + %% will end up here and will need to be deferred. + Defer(); +client_read2(true, _Right, _MsgLocation, Defer, _CState) -> + %% Of course, in the mean time, the GC could have run and our msg + %% is actually in a different file, unlocked. However, defering is + %% the safest and simplest thing to do. + Defer(); +client_read2(false, _Right, + MsgLocation = #msg_location { guid = Guid, file = File }, + Defer, + CState = #client_msstate { file_summary_ets = FileSummaryEts }) -> + %% It's entirely possible that everything we're doing from here on + %% is for the wrong file, or a non-existent file, as a GC may have + %% finished. + safe_ets_update_counter( + FileSummaryEts, File, {#file_summary.readers, +1}, + fun (_) -> client_read3(MsgLocation, Defer, CState) end, + fun () -> read(Guid, CState) end). + +client_read3(#msg_location { guid = Guid, file = File }, Defer, + CState = #client_msstate { file_handles_ets = FileHandlesEts, + file_summary_ets = FileSummaryEts, + dedup_cache_ets = DedupCacheEts, + gc_pid = GCPid, + client_ref = Ref }) -> + Release = + fun() -> ok = case ets:update_counter(FileSummaryEts, File, + {#file_summary.readers, -1}) of + 0 -> case ets:lookup(FileSummaryEts, File) of + [#file_summary { locked = true }] -> + rabbit_msg_store_gc:no_readers( + GCPid, File); + _ -> ok + end; + _ -> ok + end + end, + %% If a GC involving the file hasn't already started, it won't + %% start now. Need to check again to see if we've been locked in + %% the meantime, between lookup and update_counter (thus GC + %% started before our +1. In fact, it could have finished by now + %% too). + case ets:lookup(FileSummaryEts, File) of + [] -> %% GC has deleted our file, just go round again. + read(Guid, CState); + [#file_summary { locked = true }] -> + %% If we get a badarg here, then the GC has finished and + %% deleted our file. Try going around again. Otherwise, + %% just defer. + %% + %% badarg scenario: we lookup, msg_store locks, GC starts, + %% GC ends, we +1 readers, msg_store ets:deletes (and + %% unlocks the dest) + try Release(), + Defer() + catch error:badarg -> read(Guid, CState) + end; + [#file_summary { locked = false }] -> + %% Ok, we're definitely safe to continue - a GC involving + %% the file cannot start up now, and isn't running, so + %% nothing will tell us from now on to close the handle if + %% it's already open. + %% + %% Finally, we need to recheck that the msg is still at + %% the same place - it's possible an entire GC ran between + %% us doing the lookup and the +1 on the readers. (Same as + %% badarg scenario above, but we don't have a missing file + %% - we just have the /wrong/ file). + case index_lookup(Guid, CState) of + #msg_location { file = File } = MsgLocation -> + %% Still the same file. + {ok, CState1} = close_all_indicated(CState), + %% We are now guaranteed that the mark_handle_open + %% call will either insert_new correctly, or will + %% fail, but find the value is open, not close. + mark_handle_open(FileHandlesEts, File, Ref), + %% Could the msg_store now mark the file to be + %% closed? No: marks for closing are issued only + %% when the msg_store has locked the file. + {Msg, CState2} = %% This will never be the current file + read_from_disk(MsgLocation, CState1, DedupCacheEts), + Release(), %% this MUST NOT fail with badarg + {{ok, Msg}, CState2}; + #msg_location {} = MsgLocation -> %% different file! + Release(), %% this MUST NOT fail with badarg + client_read1(MsgLocation, Defer, CState); + not_found -> %% it seems not to exist. Defer, just to be sure. + try Release() %% this can badarg, same as locked case, above + catch error:badarg -> ok + end, + Defer() + end + end. + +clear_client(CRef, State = #msstate { cref_to_guids = CTG, + dying_clients = DyingClients }) -> + State #msstate { cref_to_guids = dict:erase(CRef, CTG), + dying_clients = sets:del_element(CRef, DyingClients) }. + + +%%---------------------------------------------------------------------------- +%% gen_server callbacks +%%---------------------------------------------------------------------------- + +init([Server, BaseDir, ClientRefs, StartupFunState]) -> + process_flag(trap_exit, true), + + ok = file_handle_cache:register_callback(?MODULE, set_maximum_since_use, + [self()]), + + Dir = filename:join(BaseDir, atom_to_list(Server)), + + {ok, IndexModule} = application:get_env(msg_store_index_module), + rabbit_log:info("~w: using ~p to provide index~n", [Server, IndexModule]), + + AttemptFileSummaryRecovery = + case ClientRefs of + undefined -> ok = rabbit_misc:recursive_delete([Dir]), + ok = filelib:ensure_dir(filename:join(Dir, "nothing")), + false; + _ -> ok = filelib:ensure_dir(filename:join(Dir, "nothing")), + recover_crashed_compactions(Dir) + end, + + %% if we found crashed compactions we trust neither the + %% file_summary nor the location index. Note the file_summary is + %% left empty here if it can't be recovered. + {FileSummaryRecovered, FileSummaryEts} = + recover_file_summary(AttemptFileSummaryRecovery, Dir), + + {CleanShutdown, IndexState, ClientRefs1} = + recover_index_and_client_refs(IndexModule, FileSummaryRecovered, + ClientRefs, Dir, Server), + Clients = dict:from_list( + [{CRef, {undefined, undefined}} || CRef <- ClientRefs1]), + %% CleanShutdown => msg location index and file_summary both + %% recovered correctly. + true = case {FileSummaryRecovered, CleanShutdown} of + {true, false} -> ets:delete_all_objects(FileSummaryEts); + _ -> true + end, + %% CleanShutdown <=> msg location index and file_summary both + %% recovered correctly. + + DedupCacheEts = ets:new(rabbit_msg_store_dedup_cache, [set, public]), + FileHandlesEts = ets:new(rabbit_msg_store_shared_file_handles, + [ordered_set, public]), + CurFileCacheEts = ets:new(rabbit_msg_store_cur_file, [set, public]), + + {ok, FileSizeLimit} = application:get_env(msg_store_file_size_limit), + + State = #msstate { dir = Dir, + index_module = IndexModule, + index_state = IndexState, + current_file = 0, + current_file_handle = undefined, + file_handle_cache = dict:new(), + on_sync = [], + sync_timer_ref = undefined, + sum_valid_data = 0, + sum_file_size = 0, + pending_gc_completion = orddict:new(), + gc_pid = undefined, + file_handles_ets = FileHandlesEts, + file_summary_ets = FileSummaryEts, + dedup_cache_ets = DedupCacheEts, + cur_file_cache_ets = CurFileCacheEts, + dying_clients = sets:new(), + clients = Clients, + successfully_recovered = CleanShutdown, + file_size_limit = FileSizeLimit, + cref_to_guids = dict:new() + }, + + %% If we didn't recover the msg location index then we need to + %% rebuild it now. + {Offset, State1 = #msstate { current_file = CurFile }} = + build_index(CleanShutdown, StartupFunState, State), + + %% read is only needed so that we can seek + {ok, CurHdl} = open_file(Dir, filenum_to_name(CurFile), + [read | ?WRITE_MODE]), + {ok, Offset} = file_handle_cache:position(CurHdl, Offset), + ok = file_handle_cache:truncate(CurHdl), + + {ok, GCPid} = rabbit_msg_store_gc:start_link( + #gc_state { dir = Dir, + index_module = IndexModule, + index_state = IndexState, + file_summary_ets = FileSummaryEts, + file_handles_ets = FileHandlesEts, + msg_store = self() + }), + + {ok, maybe_compact( + State1 #msstate { current_file_handle = CurHdl, gc_pid = GCPid }), + hibernate, + {backoff, ?HIBERNATE_AFTER_MIN, ?HIBERNATE_AFTER_MIN, ?DESIRED_HIBERNATE}}. + +prioritise_call(Msg, _From, _State) -> + case Msg of + successfully_recovered_state -> 7; + {new_client_state, _Ref, _MODC, _CloseFDsFun} -> 7; + {read, _Guid} -> 2; + _ -> 0 + end. + +prioritise_cast(Msg, _State) -> + case Msg of + sync -> 8; + {combine_files, _Source, _Destination, _Reclaimed} -> 8; + {delete_file, _File, _Reclaimed} -> 8; + {set_maximum_since_use, _Age} -> 8; + {client_dying, _Pid} -> 7; + _ -> 0 + end. + +handle_call(successfully_recovered_state, _From, State) -> + reply(State #msstate.successfully_recovered, State); + +handle_call({new_client_state, CRef, MsgOnDiskFun, CloseFDsFun}, _From, + State = #msstate { dir = Dir, + index_state = IndexState, + index_module = IndexModule, + file_handles_ets = FileHandlesEts, + file_summary_ets = FileSummaryEts, + dedup_cache_ets = DedupCacheEts, + cur_file_cache_ets = CurFileCacheEts, + clients = Clients, + gc_pid = GCPid }) -> + Clients1 = dict:store(CRef, {MsgOnDiskFun, CloseFDsFun}, Clients), + reply({IndexState, IndexModule, Dir, GCPid, + FileHandlesEts, FileSummaryEts, DedupCacheEts, CurFileCacheEts}, + State #msstate { clients = Clients1 }); + +handle_call({client_terminate, CRef}, _From, State) -> + reply(ok, clear_client(CRef, State)); + +handle_call({read, Guid}, From, State) -> + State1 = read_message(Guid, From, State), + noreply(State1); + +handle_call({contains, Guid}, From, State) -> + State1 = contains_message(Guid, From, State), + noreply(State1). + +handle_cast({client_dying, CRef}, + State = #msstate { dying_clients = DyingClients }) -> + DyingClients1 = sets:add_element(CRef, DyingClients), + noreply(write_message(CRef, <<>>, + State #msstate { dying_clients = DyingClients1 })); + +handle_cast({client_delete, CRef}, State = #msstate { clients = Clients }) -> + State1 = State #msstate { clients = dict:erase(CRef, Clients) }, + noreply(remove_message(CRef, CRef, clear_client(CRef, State1))); + +handle_cast({write, CRef, Guid}, + State = #msstate { cur_file_cache_ets = CurFileCacheEts }) -> + true = 0 =< ets:update_counter(CurFileCacheEts, Guid, {3, -1}), + [{Guid, Msg, _CacheRefCount}] = ets:lookup(CurFileCacheEts, Guid), + noreply( + case write_action(should_mask_action(CRef, Guid, State), Guid, State) of + {write, State1} -> + write_message(CRef, Guid, Msg, State1); + {ignore, CurFile, State1 = #msstate { current_file = CurFile }} -> + State1; + {ignore, _File, State1} -> + true = ets:delete_object(CurFileCacheEts, {Guid, Msg, 0}), + State1; + {confirm, CurFile, State1 = #msstate { current_file = CurFile }}-> + record_pending_confirm(CRef, Guid, State1); + {confirm, _File, State1} -> + true = ets:delete_object(CurFileCacheEts, {Guid, Msg, 0}), + update_pending_confirms( + fun (MsgOnDiskFun, CTG) -> + MsgOnDiskFun(gb_sets:singleton(Guid), written), + CTG + end, CRef, State1) + end); + +handle_cast({remove, CRef, Guids}, State) -> + State1 = lists:foldl( + fun (Guid, State2) -> remove_message(Guid, CRef, State2) end, + State, Guids), + noreply(maybe_compact( + client_confirm(CRef, gb_sets:from_list(Guids), removed, State1))); + +handle_cast({release, Guids}, State = + #msstate { dedup_cache_ets = DedupCacheEts }) -> + lists:foreach( + fun (Guid) -> decrement_cache(DedupCacheEts, Guid) end, Guids), + noreply(State); + +handle_cast({sync, Guids, K}, + State = #msstate { current_file = CurFile, + current_file_handle = CurHdl, + on_sync = Syncs }) -> + {ok, SyncOffset} = file_handle_cache:last_sync_offset(CurHdl), + case lists:any(fun (Guid) -> + #msg_location { file = File, offset = Offset } = + index_lookup(Guid, State), + File =:= CurFile andalso Offset >= SyncOffset + end, Guids) of + false -> K(), + noreply(State); + true -> noreply(State #msstate { on_sync = [K | Syncs] }) + end; + +handle_cast(sync, State) -> + noreply(internal_sync(State)); + +handle_cast({combine_files, Source, Destination, Reclaimed}, + State = #msstate { sum_file_size = SumFileSize, + file_handles_ets = FileHandlesEts, + file_summary_ets = FileSummaryEts, + clients = Clients }) -> + ok = cleanup_after_file_deletion(Source, State), + %% see comment in cleanup_after_file_deletion, and client_read3 + true = mark_handle_to_close(Clients, FileHandlesEts, Destination, false), + true = ets:update_element(FileSummaryEts, Destination, + {#file_summary.locked, false}), + State1 = State #msstate { sum_file_size = SumFileSize - Reclaimed }, + noreply(maybe_compact(run_pending([Source, Destination], State1))); + +handle_cast({delete_file, File, Reclaimed}, + State = #msstate { sum_file_size = SumFileSize }) -> + ok = cleanup_after_file_deletion(File, State), + State1 = State #msstate { sum_file_size = SumFileSize - Reclaimed }, + noreply(maybe_compact(run_pending([File], State1))); + +handle_cast({set_maximum_since_use, Age}, State) -> + ok = file_handle_cache:set_maximum_since_use(Age), + noreply(State). + +handle_info(timeout, State) -> + noreply(internal_sync(State)); + +handle_info({'EXIT', _Pid, Reason}, State) -> + {stop, Reason, State}. + +terminate(_Reason, State = #msstate { index_state = IndexState, + index_module = IndexModule, + current_file_handle = CurHdl, + gc_pid = GCPid, + file_handles_ets = FileHandlesEts, + file_summary_ets = FileSummaryEts, + dedup_cache_ets = DedupCacheEts, + cur_file_cache_ets = CurFileCacheEts, + clients = Clients, + dir = Dir }) -> + %% stop the gc first, otherwise it could be working and we pull + %% out the ets tables from under it. + ok = rabbit_msg_store_gc:stop(GCPid), + State1 = case CurHdl of + undefined -> State; + _ -> State2 = internal_sync(State), + file_handle_cache:close(CurHdl), + State2 + end, + State3 = close_all_handles(State1), + store_file_summary(FileSummaryEts, Dir), + [ets:delete(T) || + T <- [FileSummaryEts, DedupCacheEts, FileHandlesEts, CurFileCacheEts]], + IndexModule:terminate(IndexState), + store_recovery_terms([{client_refs, dict:fetch_keys(Clients)}, + {index_module, IndexModule}], Dir), + State3 #msstate { index_state = undefined, + current_file_handle = undefined }. + +code_change(_OldVsn, State, _Extra) -> + {ok, State}. + +%%---------------------------------------------------------------------------- +%% general helper functions +%%---------------------------------------------------------------------------- + +noreply(State) -> + {State1, Timeout} = next_state(State), + {noreply, State1, Timeout}. + +reply(Reply, State) -> + {State1, Timeout} = next_state(State), + {reply, Reply, State1, Timeout}. + +next_state(State = #msstate { sync_timer_ref = undefined, + on_sync = Syncs, + cref_to_guids = CTG }) -> + case {Syncs, dict:size(CTG)} of + {[], 0} -> {State, hibernate}; + _ -> {start_sync_timer(State), 0} + end; +next_state(State = #msstate { on_sync = Syncs, + cref_to_guids = CTG }) -> + case {Syncs, dict:size(CTG)} of + {[], 0} -> {stop_sync_timer(State), hibernate}; + _ -> {State, 0} + end. + +start_sync_timer(State = #msstate { sync_timer_ref = undefined }) -> + {ok, TRef} = timer:apply_after(?SYNC_INTERVAL, ?MODULE, sync, [self()]), + State #msstate { sync_timer_ref = TRef }. + +stop_sync_timer(State = #msstate { sync_timer_ref = undefined }) -> + State; +stop_sync_timer(State = #msstate { sync_timer_ref = TRef }) -> + {ok, cancel} = timer:cancel(TRef), + State #msstate { sync_timer_ref = undefined }. + +internal_sync(State = #msstate { current_file_handle = CurHdl, + on_sync = Syncs, + cref_to_guids = CTG }) -> + State1 = stop_sync_timer(State), + CGs = dict:fold(fun (CRef, Guids, NS) -> + case gb_sets:is_empty(Guids) of + true -> NS; + false -> [{CRef, Guids} | NS] + end + end, [], CTG), + case {Syncs, CGs} of + {[], []} -> ok; + _ -> file_handle_cache:sync(CurHdl) + end, + [K() || K <- lists:reverse(Syncs)], + [client_confirm(CRef, Guids, written, State1) || {CRef, Guids} <- CGs], + State1 #msstate { cref_to_guids = dict:new(), on_sync = [] }. + +write_action({true, not_found}, _Guid, State) -> + {ignore, undefined, State}; +write_action({true, #msg_location { file = File }}, _Guid, State) -> + {ignore, File, State}; +write_action({false, not_found}, _Guid, State) -> + {write, State}; +write_action({Mask, #msg_location { ref_count = 0, file = File, + total_size = TotalSize }}, + Guid, State = #msstate { file_summary_ets = FileSummaryEts }) -> + case {Mask, ets:lookup(FileSummaryEts, File)} of + {false, [#file_summary { locked = true }]} -> + ok = index_delete(Guid, State), + {write, State}; + {false_if_increment, [#file_summary { locked = true }]} -> + %% The msg for Guid is older than the client death + %% message, but as it is being GC'd currently we'll have + %% to write a new copy, which will then be younger, so + %% ignore this write. + {ignore, File, State}; + {_Mask, [#file_summary {}]} -> + ok = index_update_ref_count(Guid, 1, State), + State1 = adjust_valid_total_size(File, TotalSize, State), + {confirm, File, State1} + end; +write_action({_Mask, #msg_location { ref_count = RefCount, file = File }}, + Guid, State) -> + ok = index_update_ref_count(Guid, RefCount + 1, State), + %% We already know about it, just update counter. Only update + %% field otherwise bad interaction with concurrent GC + {confirm, File, State}. + +write_message(CRef, Guid, Msg, State) -> + write_message(Guid, Msg, record_pending_confirm(CRef, Guid, State)). + +write_message(Guid, Msg, + State = #msstate { current_file_handle = CurHdl, + current_file = CurFile, + sum_valid_data = SumValid, + sum_file_size = SumFileSize, + file_summary_ets = FileSummaryEts }) -> + {ok, CurOffset} = file_handle_cache:current_virtual_offset(CurHdl), + {ok, TotalSize} = rabbit_msg_file:append(CurHdl, Guid, Msg), + ok = index_insert( + #msg_location { guid = Guid, ref_count = 1, file = CurFile, + offset = CurOffset, total_size = TotalSize }, State), + [#file_summary { right = undefined, locked = false }] = + ets:lookup(FileSummaryEts, CurFile), + [_,_] = ets:update_counter(FileSummaryEts, CurFile, + [{#file_summary.valid_total_size, TotalSize}, + {#file_summary.file_size, TotalSize}]), + maybe_roll_to_new_file(CurOffset + TotalSize, + State #msstate { + sum_valid_data = SumValid + TotalSize, + sum_file_size = SumFileSize + TotalSize }). + +read_message(Guid, From, + State = #msstate { dedup_cache_ets = DedupCacheEts }) -> + case index_lookup_positive_ref_count(Guid, State) of + not_found -> + gen_server2:reply(From, not_found), + State; + MsgLocation -> + case fetch_and_increment_cache(DedupCacheEts, Guid) of + not_found -> read_message1(From, MsgLocation, State); + Msg -> gen_server2:reply(From, {ok, Msg}), + State + end + end. + +read_message1(From, #msg_location { guid = Guid, ref_count = RefCount, + file = File, offset = Offset } = MsgLoc, + State = #msstate { current_file = CurFile, + current_file_handle = CurHdl, + file_summary_ets = FileSummaryEts, + dedup_cache_ets = DedupCacheEts, + cur_file_cache_ets = CurFileCacheEts }) -> + case File =:= CurFile of + true -> {Msg, State1} = + %% can return [] if msg in file existed on startup + case ets:lookup(CurFileCacheEts, Guid) of + [] -> + {ok, RawOffSet} = + file_handle_cache:current_raw_offset(CurHdl), + ok = case Offset >= RawOffSet of + true -> file_handle_cache:flush(CurHdl); + false -> ok + end, + read_from_disk(MsgLoc, State, DedupCacheEts); + [{Guid, Msg1, _CacheRefCount}] -> + ok = maybe_insert_into_cache( + DedupCacheEts, RefCount, Guid, Msg1), + {Msg1, State} + end, + gen_server2:reply(From, {ok, Msg}), + State1; + false -> [#file_summary { locked = Locked }] = + ets:lookup(FileSummaryEts, File), + case Locked of + true -> add_to_pending_gc_completion({read, Guid, From}, + File, State); + false -> {Msg, State1} = + read_from_disk(MsgLoc, State, DedupCacheEts), + gen_server2:reply(From, {ok, Msg}), + State1 + end + end. + +read_from_disk(#msg_location { guid = Guid, ref_count = RefCount, + file = File, offset = Offset, + total_size = TotalSize }, + State, DedupCacheEts) -> + {Hdl, State1} = get_read_handle(File, State), + {ok, Offset} = file_handle_cache:position(Hdl, Offset), + {ok, {Guid, Msg}} = + case rabbit_msg_file:read(Hdl, TotalSize) of + {ok, {Guid, _}} = Obj -> + Obj; + Rest -> + {error, {misread, [{old_state, State}, + {file_num, File}, + {offset, Offset}, + {guid, Guid}, + {read, Rest}, + {proc_dict, get()} + ]}} + end, + ok = maybe_insert_into_cache(DedupCacheEts, RefCount, Guid, Msg), + {Msg, State1}. + +contains_message(Guid, From, + State = #msstate { pending_gc_completion = Pending }) -> + case index_lookup_positive_ref_count(Guid, State) of + not_found -> + gen_server2:reply(From, false), + State; + #msg_location { file = File } -> + case orddict:is_key(File, Pending) of + true -> add_to_pending_gc_completion( + {contains, Guid, From}, File, State); + false -> gen_server2:reply(From, true), + State + end + end. + +remove_message(Guid, CRef, + State = #msstate { file_summary_ets = FileSummaryEts, + dedup_cache_ets = DedupCacheEts }) -> + case should_mask_action(CRef, Guid, State) of + {true, _Location} -> + State; + {false_if_increment, #msg_location { ref_count = 0 }} -> + %% CRef has tried to both write and remove this msg + %% whilst it's being GC'd. ASSERTION: + %% [#file_summary { locked = true }] = + %% ets:lookup(FileSummaryEts, File), + State; + {_Mask, #msg_location { ref_count = RefCount, file = File, + total_size = TotalSize }} when RefCount > 0 -> + %% only update field, otherwise bad interaction with + %% concurrent GC + Dec = + fun () -> index_update_ref_count(Guid, RefCount - 1, State) end, + case RefCount of + %% don't remove from CUR_FILE_CACHE_ETS_NAME here + %% because there may be further writes in the mailbox + %% for the same msg. + 1 -> ok = remove_cache_entry(DedupCacheEts, Guid), + case ets:lookup(FileSummaryEts, File) of + [#file_summary { locked = true }] -> + add_to_pending_gc_completion( + {remove, Guid, CRef}, File, State); + [#file_summary {}] -> + ok = Dec(), + delete_file_if_empty( + File, adjust_valid_total_size(File, -TotalSize, + State)) + end; + _ -> ok = decrement_cache(DedupCacheEts, Guid), + ok = Dec(), + State + end + end. + +add_to_pending_gc_completion( + Op, File, State = #msstate { pending_gc_completion = Pending }) -> + State #msstate { pending_gc_completion = + rabbit_misc:orddict_cons(File, Op, Pending) }. + +run_pending(Files, State) -> + lists:foldl( + fun (File, State1 = #msstate { pending_gc_completion = Pending }) -> + Pending1 = orddict:erase(File, Pending), + lists:foldl( + fun run_pending_action/2, + State1 #msstate { pending_gc_completion = Pending1 }, + lists:reverse(orddict:fetch(File, Pending))) + end, State, Files). + +run_pending_action({read, Guid, From}, State) -> + read_message(Guid, From, State); +run_pending_action({contains, Guid, From}, State) -> + contains_message(Guid, From, State); +run_pending_action({remove, Guid, CRef}, State) -> + remove_message(Guid, CRef, State). + +safe_ets_update_counter(Tab, Key, UpdateOp, SuccessFun, FailThunk) -> + try + SuccessFun(ets:update_counter(Tab, Key, UpdateOp)) + catch error:badarg -> FailThunk() + end. + +safe_ets_update_counter_ok(Tab, Key, UpdateOp, FailThunk) -> + safe_ets_update_counter(Tab, Key, UpdateOp, fun (_) -> ok end, FailThunk). + +adjust_valid_total_size(File, Delta, State = #msstate { + sum_valid_data = SumValid, + file_summary_ets = FileSummaryEts }) -> + [_] = ets:update_counter(FileSummaryEts, File, + [{#file_summary.valid_total_size, Delta}]), + State #msstate { sum_valid_data = SumValid + Delta }. + +orddict_store(Key, Val, Dict) -> + false = orddict:is_key(Key, Dict), + orddict:store(Key, Val, Dict). + +update_pending_confirms(Fun, CRef, State = #msstate { clients = Clients, + cref_to_guids = CTG }) -> + case dict:fetch(CRef, Clients) of + {undefined, _CloseFDsFun} -> State; + {MsgOnDiskFun, _CloseFDsFun} -> CTG1 = Fun(MsgOnDiskFun, CTG), + State #msstate { cref_to_guids = CTG1 } + end. + +record_pending_confirm(CRef, Guid, State) -> + update_pending_confirms( + fun (_MsgOnDiskFun, CTG) -> + dict:update(CRef, fun (Guids) -> gb_sets:add(Guid, Guids) end, + gb_sets:singleton(Guid), CTG) + end, CRef, State). + +client_confirm(CRef, Guids, ActionTaken, State) -> + update_pending_confirms( + fun (MsgOnDiskFun, CTG) -> + MsgOnDiskFun(Guids, ActionTaken), + case dict:find(CRef, CTG) of + {ok, Gs} -> Guids1 = gb_sets:difference(Gs, Guids), + case gb_sets:is_empty(Guids1) of + true -> dict:erase(CRef, CTG); + false -> dict:store(CRef, Guids1, CTG) + end; + error -> CTG + end + end, CRef, State). + +%% Detect whether the Guid is older or younger than the client's death +%% msg (if there is one). If the msg is older than the client death +%% msg, and it has a 0 ref_count we must only alter the ref_count, not +%% rewrite the msg - rewriting it would make it younger than the death +%% msg and thus should be ignored. Note that this (correctly) returns +%% false when testing to remove the death msg itself. +should_mask_action(CRef, Guid, + State = #msstate { dying_clients = DyingClients }) -> + case {sets:is_element(CRef, DyingClients), index_lookup(Guid, State)} of + {false, Location} -> + {false, Location}; + {true, not_found} -> + {true, not_found}; + {true, #msg_location { file = File, offset = Offset, + ref_count = RefCount } = Location} -> + #msg_location { file = DeathFile, offset = DeathOffset } = + index_lookup(CRef, State), + {case {{DeathFile, DeathOffset} < {File, Offset}, RefCount} of + {true, _} -> true; + {false, 0} -> false_if_increment; + {false, _} -> false + end, Location} + end. + +%%---------------------------------------------------------------------------- +%% file helper functions +%%---------------------------------------------------------------------------- + +open_file(Dir, FileName, Mode) -> + file_handle_cache:open(form_filename(Dir, FileName), ?BINARY_MODE ++ Mode, + [{write_buffer, ?HANDLE_CACHE_BUFFER_SIZE}]). + +close_handle(Key, CState = #client_msstate { file_handle_cache = FHC }) -> + CState #client_msstate { file_handle_cache = close_handle(Key, FHC) }; + +close_handle(Key, State = #msstate { file_handle_cache = FHC }) -> + State #msstate { file_handle_cache = close_handle(Key, FHC) }; + +close_handle(Key, FHC) -> + case dict:find(Key, FHC) of + {ok, Hdl} -> ok = file_handle_cache:close(Hdl), + dict:erase(Key, FHC); + error -> FHC + end. + +mark_handle_open(FileHandlesEts, File, Ref) -> + %% This is fine to fail (already exists). Note it could fail with + %% the value being close, and not have it updated to open. + ets:insert_new(FileHandlesEts, {{Ref, File}, open}), + true. + +%% See comment in client_read3 - only call this when the file is locked +mark_handle_to_close(ClientRefs, FileHandlesEts, File, Invoke) -> + [ begin + case (ets:update_element(FileHandlesEts, Key, {2, close}) + andalso Invoke) of + true -> case dict:fetch(Ref, ClientRefs) of + {_MsgOnDiskFun, undefined} -> ok; + {_MsgOnDiskFun, CloseFDsFun} -> ok = CloseFDsFun() + end; + false -> ok + end + end || {{Ref, _File} = Key, open} <- + ets:match_object(FileHandlesEts, {{'_', File}, open}) ], + true. + +safe_file_delete_fun(File, Dir, FileHandlesEts) -> + fun () -> safe_file_delete(File, Dir, FileHandlesEts) end. + +safe_file_delete(File, Dir, FileHandlesEts) -> + %% do not match on any value - it's the absence of the row that + %% indicates the client has really closed the file. + case ets:match_object(FileHandlesEts, {{'_', File}, '_'}, 1) of + {[_|_], _Cont} -> false; + _ -> ok = file:delete( + form_filename(Dir, filenum_to_name(File))), + true + end. + +close_all_indicated(#client_msstate { file_handles_ets = FileHandlesEts, + client_ref = Ref } = + CState) -> + Objs = ets:match_object(FileHandlesEts, {{Ref, '_'}, close}), + {ok, lists:foldl(fun ({Key = {_Ref, File}, close}, CStateM) -> + true = ets:delete(FileHandlesEts, Key), + close_handle(File, CStateM) + end, CState, Objs)}. + +close_all_handles(CState = #client_msstate { file_handles_ets = FileHandlesEts, + file_handle_cache = FHC, + client_ref = Ref }) -> + ok = dict:fold(fun (File, Hdl, ok) -> + true = ets:delete(FileHandlesEts, {Ref, File}), + file_handle_cache:close(Hdl) + end, ok, FHC), + CState #client_msstate { file_handle_cache = dict:new() }; + +close_all_handles(State = #msstate { file_handle_cache = FHC }) -> + ok = dict:fold(fun (_Key, Hdl, ok) -> file_handle_cache:close(Hdl) end, + ok, FHC), + State #msstate { file_handle_cache = dict:new() }. + +get_read_handle(FileNum, CState = #client_msstate { file_handle_cache = FHC, + dir = Dir }) -> + {Hdl, FHC2} = get_read_handle(FileNum, FHC, Dir), + {Hdl, CState #client_msstate { file_handle_cache = FHC2 }}; + +get_read_handle(FileNum, State = #msstate { file_handle_cache = FHC, + dir = Dir }) -> + {Hdl, FHC2} = get_read_handle(FileNum, FHC, Dir), + {Hdl, State #msstate { file_handle_cache = FHC2 }}. + +get_read_handle(FileNum, FHC, Dir) -> + case dict:find(FileNum, FHC) of + {ok, Hdl} -> {Hdl, FHC}; + error -> {ok, Hdl} = open_file(Dir, filenum_to_name(FileNum), + ?READ_MODE), + {Hdl, dict:store(FileNum, Hdl, FHC)} + end. + +preallocate(Hdl, FileSizeLimit, FinalPos) -> + {ok, FileSizeLimit} = file_handle_cache:position(Hdl, FileSizeLimit), + ok = file_handle_cache:truncate(Hdl), + {ok, FinalPos} = file_handle_cache:position(Hdl, FinalPos), + ok. + +truncate_and_extend_file(Hdl, Lowpoint, Highpoint) -> + {ok, Lowpoint} = file_handle_cache:position(Hdl, Lowpoint), + ok = file_handle_cache:truncate(Hdl), + ok = preallocate(Hdl, Highpoint, Lowpoint). + +form_filename(Dir, Name) -> filename:join(Dir, Name). + +filenum_to_name(File) -> integer_to_list(File) ++ ?FILE_EXTENSION. + +filename_to_num(FileName) -> list_to_integer(filename:rootname(FileName)). + +list_sorted_file_names(Dir, Ext) -> + lists:sort(fun (A, B) -> filename_to_num(A) < filename_to_num(B) end, + filelib:wildcard("*" ++ Ext, Dir)). + +%%---------------------------------------------------------------------------- +%% message cache helper functions +%%---------------------------------------------------------------------------- + +maybe_insert_into_cache(DedupCacheEts, RefCount, Guid, Msg) + when RefCount > 1 -> + update_msg_cache(DedupCacheEts, Guid, Msg); +maybe_insert_into_cache(_DedupCacheEts, _RefCount, _Guid, _Msg) -> + ok. + +update_msg_cache(CacheEts, Guid, Msg) -> + case ets:insert_new(CacheEts, {Guid, Msg, 1}) of + true -> ok; + false -> safe_ets_update_counter_ok( + CacheEts, Guid, {3, +1}, + fun () -> update_msg_cache(CacheEts, Guid, Msg) end) + end. + +remove_cache_entry(DedupCacheEts, Guid) -> + true = ets:delete(DedupCacheEts, Guid), + ok. + +fetch_and_increment_cache(DedupCacheEts, Guid) -> + case ets:lookup(DedupCacheEts, Guid) of + [] -> + not_found; + [{_Guid, Msg, _RefCount}] -> + safe_ets_update_counter_ok( + DedupCacheEts, Guid, {3, +1}, + %% someone has deleted us in the meantime, insert us + fun () -> ok = update_msg_cache(DedupCacheEts, Guid, Msg) end), + Msg + end. + +decrement_cache(DedupCacheEts, Guid) -> + true = safe_ets_update_counter( + DedupCacheEts, Guid, {3, -1}, + fun (N) when N =< 0 -> true = ets:delete(DedupCacheEts, Guid); + (_N) -> true + end, + %% Guid is not in there because although it's been + %% delivered, it's never actually been read (think: + %% persistent message held in RAM) + fun () -> true end), + ok. + +%%---------------------------------------------------------------------------- +%% index +%%---------------------------------------------------------------------------- + +index_lookup_positive_ref_count(Key, State) -> + case index_lookup(Key, State) of + not_found -> not_found; + #msg_location { ref_count = 0 } -> not_found; + #msg_location {} = MsgLocation -> MsgLocation + end. + +index_update_ref_count(Key, RefCount, State) -> + index_update_fields(Key, {#msg_location.ref_count, RefCount}, State). + +index_lookup(Key, #client_msstate { index_module = Index, + index_state = State }) -> + Index:lookup(Key, State); + +index_lookup(Key, #msstate { index_module = Index, index_state = State }) -> + Index:lookup(Key, State). + +index_insert(Obj, #msstate { index_module = Index, index_state = State }) -> + Index:insert(Obj, State). + +index_update(Obj, #msstate { index_module = Index, index_state = State }) -> + Index:update(Obj, State). + +index_update_fields(Key, Updates, #msstate { index_module = Index, + index_state = State }) -> + Index:update_fields(Key, Updates, State). + +index_delete(Key, #msstate { index_module = Index, index_state = State }) -> + Index:delete(Key, State). + +index_delete_by_file(File, #msstate { index_module = Index, + index_state = State }) -> + Index:delete_by_file(File, State). + +%%---------------------------------------------------------------------------- +%% shutdown and recovery +%%---------------------------------------------------------------------------- + +recover_index_and_client_refs(IndexModule, _Recover, undefined, Dir, _Server) -> + {false, IndexModule:new(Dir), []}; +recover_index_and_client_refs(IndexModule, false, _ClientRefs, Dir, Server) -> + rabbit_log:warning("~w: rebuilding indices from scratch~n", [Server]), + {false, IndexModule:new(Dir), []}; +recover_index_and_client_refs(IndexModule, true, ClientRefs, Dir, Server) -> + Fresh = fun (ErrorMsg, ErrorArgs) -> + rabbit_log:warning("~w: " ++ ErrorMsg ++ "~n" + "rebuilding indices from scratch~n", + [Server | ErrorArgs]), + {false, IndexModule:new(Dir), []} + end, + case read_recovery_terms(Dir) of + {false, Error} -> + Fresh("failed to read recovery terms: ~p", [Error]); + {true, Terms} -> + RecClientRefs = proplists:get_value(client_refs, Terms, []), + RecIndexModule = proplists:get_value(index_module, Terms), + case (lists:sort(ClientRefs) =:= lists:sort(RecClientRefs) + andalso IndexModule =:= RecIndexModule) of + true -> case IndexModule:recover(Dir) of + {ok, IndexState1} -> + {true, IndexState1, ClientRefs}; + {error, Error} -> + Fresh("failed to recover index: ~p", [Error]) + end; + false -> Fresh("recovery terms differ from present", []) + end + end. + +store_recovery_terms(Terms, Dir) -> + rabbit_misc:write_term_file(filename:join(Dir, ?CLEAN_FILENAME), Terms). + +read_recovery_terms(Dir) -> + Path = filename:join(Dir, ?CLEAN_FILENAME), + case rabbit_misc:read_term_file(Path) of + {ok, Terms} -> case file:delete(Path) of + ok -> {true, Terms}; + {error, Error} -> {false, Error} + end; + {error, Error} -> {false, Error} + end. + +store_file_summary(Tid, Dir) -> + ok = ets:tab2file(Tid, filename:join(Dir, ?FILE_SUMMARY_FILENAME), + [{extended_info, [object_count]}]). + +recover_file_summary(false, _Dir) -> + %% TODO: the only reason for this to be an *ordered*_set is so + %% that a) maybe_compact can start a traversal from the eldest + %% file, and b) build_index in fast recovery mode can easily + %% identify the current file. It's awkward to have both that + %% odering and the left/right pointers in the entries - replacing + %% the former with some additional bit of state would be easy, but + %% ditching the latter would be neater. + {false, ets:new(rabbit_msg_store_file_summary, + [ordered_set, public, {keypos, #file_summary.file}])}; +recover_file_summary(true, Dir) -> + Path = filename:join(Dir, ?FILE_SUMMARY_FILENAME), + case ets:file2tab(Path) of + {ok, Tid} -> file:delete(Path), + {true, Tid}; + {error, _Error} -> recover_file_summary(false, Dir) + end. + +count_msg_refs(Gen, Seed, State) -> + case Gen(Seed) of + finished -> + ok; + {_Guid, 0, Next} -> + count_msg_refs(Gen, Next, State); + {Guid, Delta, Next} -> + ok = case index_lookup(Guid, State) of + not_found -> + index_insert(#msg_location { guid = Guid, + file = undefined, + ref_count = Delta }, + State); + #msg_location { ref_count = RefCount } = StoreEntry -> + NewRefCount = RefCount + Delta, + case NewRefCount of + 0 -> index_delete(Guid, State); + _ -> index_update(StoreEntry #msg_location { + ref_count = NewRefCount }, + State) + end + end, + count_msg_refs(Gen, Next, State) + end. + +recover_crashed_compactions(Dir) -> + FileNames = list_sorted_file_names(Dir, ?FILE_EXTENSION), + TmpFileNames = list_sorted_file_names(Dir, ?FILE_EXTENSION_TMP), + lists:foreach( + fun (TmpFileName) -> + NonTmpRelatedFileName = + filename:rootname(TmpFileName) ++ ?FILE_EXTENSION, + true = lists:member(NonTmpRelatedFileName, FileNames), + ok = recover_crashed_compaction( + Dir, TmpFileName, NonTmpRelatedFileName) + end, TmpFileNames), + TmpFileNames == []. + +recover_crashed_compaction(Dir, TmpFileName, NonTmpRelatedFileName) -> + %% Because a msg can legitimately appear multiple times in the + %% same file, identifying the contents of the tmp file and where + %% they came from is non-trivial. If we are recovering a crashed + %% compaction then we will be rebuilding the index, which can cope + %% with duplicates appearing. Thus the simplest and safest thing + %% to do is to append the contents of the tmp file to its main + %% file. + {ok, TmpHdl} = open_file(Dir, TmpFileName, ?READ_MODE), + {ok, MainHdl} = open_file(Dir, NonTmpRelatedFileName, + ?READ_MODE ++ ?WRITE_MODE), + {ok, _End} = file_handle_cache:position(MainHdl, eof), + Size = filelib:file_size(form_filename(Dir, TmpFileName)), + {ok, Size} = file_handle_cache:copy(TmpHdl, MainHdl, Size), + ok = file_handle_cache:close(MainHdl), + ok = file_handle_cache:delete(TmpHdl), + ok. + +scan_file_for_valid_messages(Dir, FileName) -> + case open_file(Dir, FileName, ?READ_MODE) of + {ok, Hdl} -> Valid = rabbit_msg_file:scan( + Hdl, filelib:file_size( + form_filename(Dir, FileName))), + %% if something really bad has happened, + %% the close could fail, but ignore + file_handle_cache:close(Hdl), + Valid; + {error, enoent} -> {ok, [], 0}; + {error, Reason} -> {error, {unable_to_scan_file, FileName, Reason}} + end. + +%% Takes the list in *ascending* order (i.e. eldest message +%% first). This is the opposite of what scan_file_for_valid_messages +%% produces. The list of msgs that is produced is youngest first. +drop_contiguous_block_prefix(L) -> drop_contiguous_block_prefix(L, 0). + +drop_contiguous_block_prefix([], ExpectedOffset) -> + {ExpectedOffset, []}; +drop_contiguous_block_prefix([#msg_location { offset = ExpectedOffset, + total_size = TotalSize } | Tail], + ExpectedOffset) -> + ExpectedOffset1 = ExpectedOffset + TotalSize, + drop_contiguous_block_prefix(Tail, ExpectedOffset1); +drop_contiguous_block_prefix(MsgsAfterGap, ExpectedOffset) -> + {ExpectedOffset, MsgsAfterGap}. + +build_index(true, _StartupFunState, + State = #msstate { file_summary_ets = FileSummaryEts }) -> + ets:foldl( + fun (#file_summary { valid_total_size = ValidTotalSize, + file_size = FileSize, + file = File }, + {_Offset, State1 = #msstate { sum_valid_data = SumValid, + sum_file_size = SumFileSize }}) -> + {FileSize, State1 #msstate { + sum_valid_data = SumValid + ValidTotalSize, + sum_file_size = SumFileSize + FileSize, + current_file = File }} + end, {0, State}, FileSummaryEts); +build_index(false, {MsgRefDeltaGen, MsgRefDeltaGenInit}, + State = #msstate { dir = Dir }) -> + ok = count_msg_refs(MsgRefDeltaGen, MsgRefDeltaGenInit, State), + {ok, Pid} = gatherer:start_link(), + case [filename_to_num(FileName) || + FileName <- list_sorted_file_names(Dir, ?FILE_EXTENSION)] of + [] -> build_index(Pid, undefined, [State #msstate.current_file], + State); + Files -> {Offset, State1} = build_index(Pid, undefined, Files, State), + {Offset, lists:foldl(fun delete_file_if_empty/2, + State1, Files)} + end. + +build_index(Gatherer, Left, [], + State = #msstate { file_summary_ets = FileSummaryEts, + sum_valid_data = SumValid, + sum_file_size = SumFileSize }) -> + case gatherer:out(Gatherer) of + empty -> + ok = gatherer:stop(Gatherer), + ok = rabbit_misc:unlink_and_capture_exit(Gatherer), + ok = index_delete_by_file(undefined, State), + Offset = case ets:lookup(FileSummaryEts, Left) of + [] -> 0; + [#file_summary { file_size = FileSize }] -> FileSize + end, + {Offset, State #msstate { current_file = Left }}; + {value, #file_summary { valid_total_size = ValidTotalSize, + file_size = FileSize } = FileSummary} -> + true = ets:insert_new(FileSummaryEts, FileSummary), + build_index(Gatherer, Left, [], + State #msstate { + sum_valid_data = SumValid + ValidTotalSize, + sum_file_size = SumFileSize + FileSize }) + end; +build_index(Gatherer, Left, [File|Files], State) -> + ok = gatherer:fork(Gatherer), + ok = worker_pool:submit_async( + fun () -> build_index_worker(Gatherer, State, + Left, File, Files) + end), + build_index(Gatherer, File, Files, State). + +build_index_worker(Gatherer, State = #msstate { dir = Dir }, + Left, File, Files) -> + {ok, Messages, FileSize} = + scan_file_for_valid_messages(Dir, filenum_to_name(File)), + {ValidMessages, ValidTotalSize} = + lists:foldl( + fun (Obj = {Guid, TotalSize, Offset}, {VMAcc, VTSAcc}) -> + case index_lookup(Guid, State) of + #msg_location { file = undefined } = StoreEntry -> + ok = index_update(StoreEntry #msg_location { + file = File, offset = Offset, + total_size = TotalSize }, + State), + {[Obj | VMAcc], VTSAcc + TotalSize}; + _ -> + {VMAcc, VTSAcc} + end + end, {[], 0}, Messages), + {Right, FileSize1} = + case Files of + %% if it's the last file, we'll truncate to remove any + %% rubbish above the last valid message. This affects the + %% file size. + [] -> {undefined, case ValidMessages of + [] -> 0; + _ -> {_Guid, TotalSize, Offset} = + lists:last(ValidMessages), + Offset + TotalSize + end}; + [F|_] -> {F, FileSize} + end, + ok = gatherer:in(Gatherer, #file_summary { + file = File, + valid_total_size = ValidTotalSize, + left = Left, + right = Right, + file_size = FileSize1, + locked = false, + readers = 0 }), + ok = gatherer:finish(Gatherer). + +%%---------------------------------------------------------------------------- +%% garbage collection / compaction / aggregation -- internal +%%---------------------------------------------------------------------------- + +maybe_roll_to_new_file( + Offset, + State = #msstate { dir = Dir, + current_file_handle = CurHdl, + current_file = CurFile, + file_summary_ets = FileSummaryEts, + cur_file_cache_ets = CurFileCacheEts, + file_size_limit = FileSizeLimit }) + when Offset >= FileSizeLimit -> + State1 = internal_sync(State), + ok = file_handle_cache:close(CurHdl), + NextFile = CurFile + 1, + {ok, NextHdl} = open_file(Dir, filenum_to_name(NextFile), ?WRITE_MODE), + true = ets:insert_new(FileSummaryEts, #file_summary { + file = NextFile, + valid_total_size = 0, + left = CurFile, + right = undefined, + file_size = 0, + locked = false, + readers = 0 }), + true = ets:update_element(FileSummaryEts, CurFile, + {#file_summary.right, NextFile}), + true = ets:match_delete(CurFileCacheEts, {'_', '_', 0}), + maybe_compact(State1 #msstate { current_file_handle = NextHdl, + current_file = NextFile }); +maybe_roll_to_new_file(_, State) -> + State. + +maybe_compact(State = #msstate { sum_valid_data = SumValid, + sum_file_size = SumFileSize, + gc_pid = GCPid, + pending_gc_completion = Pending, + file_summary_ets = FileSummaryEts, + file_size_limit = FileSizeLimit }) + when (SumFileSize > 2 * FileSizeLimit andalso + (SumFileSize - SumValid) / SumFileSize > ?GARBAGE_FRACTION) -> + %% TODO: the algorithm here is sub-optimal - it may result in a + %% complete traversal of FileSummaryEts. + case ets:first(FileSummaryEts) of + '$end_of_table' -> + State; + First -> + case find_files_to_combine(FileSummaryEts, FileSizeLimit, + ets:lookup(FileSummaryEts, First)) of + not_found -> + State; + {Src, Dst} -> + Pending1 = orddict_store(Dst, [], + orddict_store(Src, [], Pending)), + State1 = close_handle(Src, close_handle(Dst, State)), + true = ets:update_element(FileSummaryEts, Src, + {#file_summary.locked, true}), + true = ets:update_element(FileSummaryEts, Dst, + {#file_summary.locked, true}), + ok = rabbit_msg_store_gc:combine(GCPid, Src, Dst), + State1 #msstate { pending_gc_completion = Pending1 } + end + end; +maybe_compact(State) -> + State. + +find_files_to_combine(FileSummaryEts, FileSizeLimit, + [#file_summary { file = Dst, + valid_total_size = DstValid, + right = Src, + locked = DstLocked }]) -> + case Src of + undefined -> + not_found; + _ -> + [#file_summary { file = Src, + valid_total_size = SrcValid, + left = Dst, + right = SrcRight, + locked = SrcLocked }] = Next = + ets:lookup(FileSummaryEts, Src), + case SrcRight of + undefined -> not_found; + _ -> case (DstValid + SrcValid =< FileSizeLimit) andalso + (DstValid > 0) andalso (SrcValid > 0) andalso + not (DstLocked orelse SrcLocked) of + true -> {Src, Dst}; + false -> find_files_to_combine( + FileSummaryEts, FileSizeLimit, Next) + end + end + end. + +delete_file_if_empty(File, State = #msstate { current_file = File }) -> + State; +delete_file_if_empty(File, State = #msstate { + gc_pid = GCPid, + file_summary_ets = FileSummaryEts, + pending_gc_completion = Pending }) -> + [#file_summary { valid_total_size = ValidData, + locked = false }] = + ets:lookup(FileSummaryEts, File), + case ValidData of + 0 -> %% don't delete the file_summary_ets entry for File here + %% because we could have readers which need to be able to + %% decrement the readers count. + true = ets:update_element(FileSummaryEts, File, + {#file_summary.locked, true}), + ok = rabbit_msg_store_gc:delete(GCPid, File), + Pending1 = orddict_store(File, [], Pending), + close_handle(File, + State #msstate { pending_gc_completion = Pending1 }); + _ -> State + end. + +cleanup_after_file_deletion(File, + #msstate { file_handles_ets = FileHandlesEts, + file_summary_ets = FileSummaryEts, + clients = Clients }) -> + %% Ensure that any clients that have open fhs to the file close + %% them before using them again. This has to be done here (given + %% it's done in the msg_store, and not the gc), and not when + %% starting up the GC, because if done when starting up the GC, + %% the client could find the close, and close and reopen the fh, + %% whilst the GC is waiting for readers to disappear, before it's + %% actually done the GC. + true = mark_handle_to_close(Clients, FileHandlesEts, File, true), + [#file_summary { left = Left, + right = Right, + locked = true, + readers = 0 }] = ets:lookup(FileSummaryEts, File), + %% We'll never delete the current file, so right is never undefined + true = Right =/= undefined, %% ASSERTION + true = ets:update_element(FileSummaryEts, Right, + {#file_summary.left, Left}), + %% ensure the double linked list is maintained + true = case Left of + undefined -> true; %% File is the eldest file (left-most) + _ -> ets:update_element(FileSummaryEts, Left, + {#file_summary.right, Right}) + end, + true = ets:delete(FileSummaryEts, File), + ok. + +%%---------------------------------------------------------------------------- +%% garbage collection / compaction / aggregation -- external +%%---------------------------------------------------------------------------- + +has_readers(File, #gc_state { file_summary_ets = FileSummaryEts }) -> + [#file_summary { locked = true, readers = Count }] = + ets:lookup(FileSummaryEts, File), + Count /= 0. + +combine_files(Source, Destination, + State = #gc_state { file_summary_ets = FileSummaryEts, + file_handles_ets = FileHandlesEts, + dir = Dir, + msg_store = Server }) -> + [#file_summary { + readers = 0, + left = Destination, + valid_total_size = SourceValid, + file_size = SourceFileSize, + locked = true }] = ets:lookup(FileSummaryEts, Source), + [#file_summary { + readers = 0, + right = Source, + valid_total_size = DestinationValid, + file_size = DestinationFileSize, + locked = true }] = ets:lookup(FileSummaryEts, Destination), + + SourceName = filenum_to_name(Source), + DestinationName = filenum_to_name(Destination), + {ok, SourceHdl} = open_file(Dir, SourceName, + ?READ_AHEAD_MODE), + {ok, DestinationHdl} = open_file(Dir, DestinationName, + ?READ_AHEAD_MODE ++ ?WRITE_MODE), + TotalValidData = SourceValid + DestinationValid, + %% if DestinationValid =:= DestinationContiguousTop then we don't + %% need a tmp file + %% if they're not equal, then we need to write out everything past + %% the DestinationContiguousTop to a tmp file then truncate, + %% copy back in, and then copy over from Source + %% otherwise we just truncate straight away and copy over from Source + {DestinationWorkList, DestinationValid} = + load_and_vacuum_message_file(Destination, State), + {DestinationContiguousTop, DestinationWorkListTail} = + drop_contiguous_block_prefix(DestinationWorkList), + case DestinationWorkListTail of + [] -> ok = truncate_and_extend_file( + DestinationHdl, DestinationContiguousTop, TotalValidData); + _ -> Tmp = filename:rootname(DestinationName) ++ ?FILE_EXTENSION_TMP, + {ok, TmpHdl} = open_file(Dir, Tmp, ?READ_AHEAD_MODE++?WRITE_MODE), + ok = copy_messages( + DestinationWorkListTail, DestinationContiguousTop, + DestinationValid, DestinationHdl, TmpHdl, Destination, + State), + TmpSize = DestinationValid - DestinationContiguousTop, + %% so now Tmp contains everything we need to salvage + %% from Destination, and index_state has been updated to + %% reflect the compaction of Destination so truncate + %% Destination and copy from Tmp back to the end + {ok, 0} = file_handle_cache:position(TmpHdl, 0), + ok = truncate_and_extend_file( + DestinationHdl, DestinationContiguousTop, TotalValidData), + {ok, TmpSize} = + file_handle_cache:copy(TmpHdl, DestinationHdl, TmpSize), + %% position in DestinationHdl should now be DestinationValid + ok = file_handle_cache:sync(DestinationHdl), + ok = file_handle_cache:delete(TmpHdl) + end, + {SourceWorkList, SourceValid} = load_and_vacuum_message_file(Source, State), + ok = copy_messages(SourceWorkList, DestinationValid, TotalValidData, + SourceHdl, DestinationHdl, Destination, State), + %% tidy up + ok = file_handle_cache:close(DestinationHdl), + ok = file_handle_cache:close(SourceHdl), + + %% don't update dest.right, because it could be changing at the + %% same time + true = ets:update_element( + FileSummaryEts, Destination, + [{#file_summary.valid_total_size, TotalValidData}, + {#file_summary.file_size, TotalValidData}]), + + Reclaimed = SourceFileSize + DestinationFileSize - TotalValidData, + gen_server2:cast(Server, {combine_files, Source, Destination, Reclaimed}), + safe_file_delete_fun(Source, Dir, FileHandlesEts). + +delete_file(File, State = #gc_state { file_summary_ets = FileSummaryEts, + file_handles_ets = FileHandlesEts, + dir = Dir, + msg_store = Server }) -> + [#file_summary { valid_total_size = 0, + locked = true, + file_size = FileSize, + readers = 0 }] = ets:lookup(FileSummaryEts, File), + {[], 0} = load_and_vacuum_message_file(File, State), + gen_server2:cast(Server, {delete_file, File, FileSize}), + safe_file_delete_fun(File, Dir, FileHandlesEts). + +load_and_vacuum_message_file(File, #gc_state { dir = Dir, + index_module = Index, + index_state = IndexState }) -> + %% Messages here will be end-of-file at start-of-list + {ok, Messages, _FileSize} = + scan_file_for_valid_messages(Dir, filenum_to_name(File)), + %% foldl will reverse so will end up with msgs in ascending offset order + lists:foldl( + fun ({Guid, TotalSize, Offset}, Acc = {List, Size}) -> + case Index:lookup(Guid, IndexState) of + #msg_location { file = File, total_size = TotalSize, + offset = Offset, ref_count = 0 } = Entry -> + ok = Index:delete_object(Entry, IndexState), + Acc; + #msg_location { file = File, total_size = TotalSize, + offset = Offset } = Entry -> + {[ Entry | List ], TotalSize + Size}; + _ -> + Acc + end + end, {[], 0}, Messages). + +copy_messages(WorkList, InitOffset, FinalOffset, SourceHdl, DestinationHdl, + Destination, #gc_state { index_module = Index, + index_state = IndexState }) -> + Copy = fun ({BlockStart, BlockEnd}) -> + BSize = BlockEnd - BlockStart, + {ok, BlockStart} = + file_handle_cache:position(SourceHdl, BlockStart), + {ok, BSize} = + file_handle_cache:copy(SourceHdl, DestinationHdl, BSize) + end, + case + lists:foldl( + fun (#msg_location { guid = Guid, offset = Offset, + total_size = TotalSize }, + {CurOffset, Block = {BlockStart, BlockEnd}}) -> + %% CurOffset is in the DestinationFile. + %% Offset, BlockStart and BlockEnd are in the SourceFile + %% update MsgLocation to reflect change of file and offset + ok = Index:update_fields(Guid, + [{#msg_location.file, Destination}, + {#msg_location.offset, CurOffset}], + IndexState), + {CurOffset + TotalSize, + case BlockEnd of + undefined -> + %% base case, called only for the first list elem + {Offset, Offset + TotalSize}; + Offset -> + %% extend the current block because the + %% next msg follows straight on + {BlockStart, BlockEnd + TotalSize}; + _ -> + %% found a gap, so actually do the work for + %% the previous block + Copy(Block), + {Offset, Offset + TotalSize} + end} + end, {InitOffset, {undefined, undefined}}, WorkList) of + {FinalOffset, Block} -> + case WorkList of + [] -> ok; + _ -> Copy(Block), %% do the last remaining block + ok = file_handle_cache:sync(DestinationHdl) + end; + {FinalOffsetZ, _Block} -> + {gc_error, [{expected, FinalOffset}, + {got, FinalOffsetZ}, + {destination, Destination}]} + end. diff -Nru rabbitmq-server-1.7.2/src/rabbit_msg_store_ets_index.erl rabbitmq-server-2.3.1/src/rabbit_msg_store_ets_index.erl --- rabbitmq-server-1.7.2/src/rabbit_msg_store_ets_index.erl 1970-01-01 00:00:00.000000000 +0000 +++ rabbitmq-server-2.3.1/src/rabbit_msg_store_ets_index.erl 2011-02-03 12:47:35.000000000 +0000 @@ -0,0 +1,79 @@ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License +%% at http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and +%% limitations under the License. +%% +%% The Original Code is RabbitMQ. +%% +%% The Initial Developer of the Original Code is VMware, Inc. +%% Copyright (c) 2007-2011 VMware, Inc. All rights reserved. +%% + +-module(rabbit_msg_store_ets_index). + +-behaviour(rabbit_msg_store_index). + +-export([new/1, recover/1, + lookup/2, insert/2, update/2, update_fields/3, delete/2, + delete_object/2, delete_by_file/2, terminate/1]). + +-define(MSG_LOC_NAME, rabbit_msg_store_ets_index). +-define(FILENAME, "msg_store_index.ets"). + +-include("rabbit_msg_store_index.hrl"). + +-record(state, { table, dir }). + +new(Dir) -> + file:delete(filename:join(Dir, ?FILENAME)), + Tid = ets:new(?MSG_LOC_NAME, [set, public, {keypos, #msg_location.guid}]), + #state { table = Tid, dir = Dir }. + +recover(Dir) -> + Path = filename:join(Dir, ?FILENAME), + case ets:file2tab(Path) of + {ok, Tid} -> file:delete(Path), + {ok, #state { table = Tid, dir = Dir }}; + Error -> Error + end. + +lookup(Key, State) -> + case ets:lookup(State #state.table, Key) of + [] -> not_found; + [Entry] -> Entry + end. + +insert(Obj, State) -> + true = ets:insert_new(State #state.table, Obj), + ok. + +update(Obj, State) -> + true = ets:insert(State #state.table, Obj), + ok. + +update_fields(Key, Updates, State) -> + true = ets:update_element(State #state.table, Key, Updates), + ok. + +delete(Key, State) -> + true = ets:delete(State #state.table, Key), + ok. + +delete_object(Obj, State) -> + true = ets:delete_object(State #state.table, Obj), + ok. + +delete_by_file(File, State) -> + MatchHead = #msg_location { file = File, _ = '_' }, + ets:select_delete(State #state.table, [{MatchHead, [], [true]}]), + ok. + +terminate(#state { table = MsgLocations, dir = Dir }) -> + ok = ets:tab2file(MsgLocations, filename:join(Dir, ?FILENAME), + [{extended_info, [object_count]}]), + ets:delete(MsgLocations). diff -Nru rabbitmq-server-1.7.2/src/rabbit_msg_store_gc.erl rabbitmq-server-2.3.1/src/rabbit_msg_store_gc.erl --- rabbitmq-server-1.7.2/src/rabbit_msg_store_gc.erl 1970-01-01 00:00:00.000000000 +0000 +++ rabbitmq-server-2.3.1/src/rabbit_msg_store_gc.erl 2011-02-03 12:47:35.000000000 +0000 @@ -0,0 +1,137 @@ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License +%% at http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and +%% limitations under the License. +%% +%% The Original Code is RabbitMQ. +%% +%% The Initial Developer of the Original Code is VMware, Inc. +%% Copyright (c) 2007-2011 VMware, Inc. All rights reserved. +%% + +-module(rabbit_msg_store_gc). + +-behaviour(gen_server2). + +-export([start_link/1, combine/3, delete/2, no_readers/2, stop/1]). + +-export([set_maximum_since_use/2]). + +-export([init/1, handle_call/3, handle_cast/2, handle_info/2, + terminate/2, code_change/3, prioritise_cast/2]). + +-record(state, + { pending_no_readers, + on_action, + msg_store_state + }). + +-include("rabbit.hrl"). + +%%---------------------------------------------------------------------------- + +-ifdef(use_specs). + +-spec(start_link/1 :: (rabbit_msg_store:gc_state()) -> + rabbit_types:ok_pid_or_error()). +-spec(combine/3 :: (pid(), rabbit_msg_store:file_num(), + rabbit_msg_store:file_num()) -> 'ok'). +-spec(delete/2 :: (pid(), rabbit_msg_store:file_num()) -> 'ok'). +-spec(no_readers/2 :: (pid(), rabbit_msg_store:file_num()) -> 'ok'). +-spec(stop/1 :: (pid()) -> 'ok'). +-spec(set_maximum_since_use/2 :: (pid(), non_neg_integer()) -> 'ok'). + +-endif. + +%%---------------------------------------------------------------------------- + +start_link(MsgStoreState) -> + gen_server2:start_link(?MODULE, [MsgStoreState], + [{timeout, infinity}]). + +combine(Server, Source, Destination) -> + gen_server2:cast(Server, {combine, Source, Destination}). + +delete(Server, File) -> + gen_server2:cast(Server, {delete, File}). + +no_readers(Server, File) -> + gen_server2:cast(Server, {no_readers, File}). + +stop(Server) -> + gen_server2:call(Server, stop, infinity). + +set_maximum_since_use(Pid, Age) -> + gen_server2:cast(Pid, {set_maximum_since_use, Age}). + +%%---------------------------------------------------------------------------- + +init([MsgStoreState]) -> + ok = file_handle_cache:register_callback(?MODULE, set_maximum_since_use, + [self()]), + {ok, #state { pending_no_readers = dict:new(), + on_action = [], + msg_store_state = MsgStoreState }, hibernate, + {backoff, ?HIBERNATE_AFTER_MIN, ?HIBERNATE_AFTER_MIN, ?DESIRED_HIBERNATE}}. + +prioritise_cast({set_maximum_since_use, _Age}, _State) -> 8; +prioritise_cast(_Msg, _State) -> 0. + +handle_call(stop, _From, State) -> + {stop, normal, ok, State}. + +handle_cast({combine, Source, Destination}, State) -> + {noreply, attempt_action(combine, [Source, Destination], State), hibernate}; + +handle_cast({delete, File}, State) -> + {noreply, attempt_action(delete, [File], State), hibernate}; + +handle_cast({no_readers, File}, + State = #state { pending_no_readers = Pending }) -> + {noreply, case dict:find(File, Pending) of + error -> + State; + {ok, {Action, Files}} -> + Pending1 = dict:erase(File, Pending), + attempt_action( + Action, Files, + State #state { pending_no_readers = Pending1 }) + end, hibernate}; + +handle_cast({set_maximum_since_use, Age}, State) -> + ok = file_handle_cache:set_maximum_since_use(Age), + {noreply, State, hibernate}. + +handle_info(Info, State) -> + {stop, {unhandled_info, Info}, State}. + +terminate(_Reason, State) -> + State. + +code_change(_OldVsn, State, _Extra) -> + {ok, State}. + +attempt_action(Action, Files, + State = #state { pending_no_readers = Pending, + on_action = Thunks, + msg_store_state = MsgStoreState }) -> + case [File || File <- Files, + rabbit_msg_store:has_readers(File, MsgStoreState)] of + [] -> State #state { + on_action = lists:filter( + fun (Thunk) -> not Thunk() end, + [do_action(Action, Files, MsgStoreState) | + Thunks]) }; + [File | _] -> Pending1 = dict:store(File, {Action, Files}, Pending), + State #state { pending_no_readers = Pending1 } + end. + +do_action(combine, [Source, Destination], MsgStoreState) -> + rabbit_msg_store:combine_files(Source, Destination, MsgStoreState); +do_action(delete, [File], MsgStoreState) -> + rabbit_msg_store:delete_file(File, MsgStoreState). diff -Nru rabbitmq-server-1.7.2/src/rabbit_msg_store_index.erl rabbitmq-server-2.3.1/src/rabbit_msg_store_index.erl --- rabbitmq-server-1.7.2/src/rabbit_msg_store_index.erl 1970-01-01 00:00:00.000000000 +0000 +++ rabbitmq-server-2.3.1/src/rabbit_msg_store_index.erl 2011-02-03 12:47:35.000000000 +0000 @@ -0,0 +1,32 @@ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License +%% at http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and +%% limitations under the License. +%% +%% The Original Code is RabbitMQ. +%% +%% The Initial Developer of the Original Code is VMware, Inc. +%% Copyright (c) 2007-2011 VMware, Inc. All rights reserved. +%% + +-module(rabbit_msg_store_index). + +-export([behaviour_info/1]). + +behaviour_info(callbacks) -> + [{new, 1}, + {recover, 1}, + {lookup, 2}, + {insert, 2}, + {update, 2}, + {update_fields, 3}, + {delete, 2}, + {delete_by_file, 2}, + {terminate, 1}]; +behaviour_info(_Other) -> + undefined. diff -Nru rabbitmq-server-1.7.2/src/rabbit_multi.erl rabbitmq-server-2.3.1/src/rabbit_multi.erl --- rabbitmq-server-1.7.2/src/rabbit_multi.erl 2010-02-15 16:01:03.000000000 +0000 +++ rabbitmq-server-2.3.1/src/rabbit_multi.erl 2011-02-03 12:47:35.000000000 +0000 @@ -1,32 +1,17 @@ -%% The contents of this file are subject to the Mozilla Public License -%% Version 1.1 (the "License"); you may not use this file except in -%% compliance with the License. You may obtain a copy of the License at -%% http://www.mozilla.org/MPL/ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License +%% at http://www.mozilla.org/MPL/ %% -%% Software distributed under the License is distributed on an "AS IS" -%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the -%% License for the specific language governing rights and limitations -%% under the License. +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and +%% limitations under the License. %% -%% The Original Code is RabbitMQ. +%% The Original Code is RabbitMQ. %% -%% The Initial Developers of the Original Code are LShift Ltd, -%% Cohesive Financial Technologies LLC, and Rabbit Technologies Ltd. -%% -%% Portions created before 22-Nov-2008 00:00:00 GMT by LShift Ltd, -%% Cohesive Financial Technologies LLC, or Rabbit Technologies Ltd -%% are Copyright (C) 2007-2008 LShift Ltd, Cohesive Financial -%% Technologies LLC, and Rabbit Technologies Ltd. -%% -%% Portions created by LShift Ltd are Copyright (C) 2007-2010 LShift -%% Ltd. Portions created by Cohesive Financial Technologies LLC are -%% Copyright (C) 2007-2010 Cohesive Financial Technologies -%% LLC. Portions created by Rabbit Technologies Ltd are Copyright -%% (C) 2007-2010 Rabbit Technologies Ltd. -%% -%% All Rights Reserved. -%% -%% Contributor(s): ______________________________________. +%% The Initial Developer of the Original Code is VMware, Inc. +%% Copyright (c) 2007-2011 VMware, Inc. All rights reserved. %% -module(rabbit_multi). @@ -42,6 +27,7 @@ -spec(start/0 :: () -> no_return()). -spec(stop/0 :: () -> 'ok'). +-spec(usage/0 :: () -> no_return()). -endif. @@ -51,7 +37,7 @@ RpcTimeout = case init:get_argument(maxwait) of {ok,[[N1]]} -> 1000 * list_to_integer(N1); - _ -> 30000 + _ -> ?MAX_WAIT end, case init:get_plain_arguments() of [] -> @@ -63,20 +49,19 @@ io:format("done.~n"), halt(); {'EXIT', {function_clause, [{?MODULE, action, _} | _]}} -> - error("invalid command '~s'", - [lists:flatten( - rabbit_misc:intersperse(" ", FullCommand))]), + print_error("invalid command '~s'", + [string:join(FullCommand, " ")]), usage(); timeout -> - error("timeout starting some nodes.", []), + print_error("timeout starting some nodes.", []), halt(1); Other -> - error("~p", [Other]), + print_error("~p", [Other]), halt(2) end end. -error(Format, Args) -> +print_error(Format, Args) -> rabbit_misc:format_stderr("Error: " ++ Format ++ "~n", Args). parse_args([Command | Args]) -> @@ -86,21 +71,20 @@ ok. usage() -> - io:format("Usage: rabbitmq-multi - -Available commands: - - start_all - start a local cluster of RabbitMQ nodes. - status - print status of all running nodes - stop_all - stops all local RabbitMQ nodes. - rotate_logs [Suffix] - rotate logs for all local and running RabbitMQ nodes. -"), - halt(3). + io:format("~s", [rabbit_multi_usage:usage()]), + halt(1). action(start_all, [NodeCount], RpcTimeout) -> io:format("Starting all nodes...~n", []), application:load(rabbit), - NodeName = rabbit_misc:nodeparts(getenv("RABBITMQ_NODENAME")), + {_NodeNamePrefix, NodeHost} = NodeName = rabbit_misc:nodeparts( + getenv("RABBITMQ_NODENAME")), + case net_adm:names(NodeHost) of + {error, EpmdReason} -> + throw({cannot_connect_to_epmd, NodeHost, EpmdReason}); + {ok, _} -> + ok + end, {NodePids, Running} = case list_to_integer(NodeCount) of 1 -> {NodePid, Started} = start_node(rabbit_misc:makenode(NodeName), @@ -118,7 +102,7 @@ action(status, [], RpcTimeout) -> io:format("Status of all running nodes...~n", []), call_all_nodes( - fun({Node, Pid}) -> + fun ({Node, Pid}) -> RabbitRunning = case is_rabbit_running(Node, RpcTimeout) of false -> not_running; @@ -130,7 +114,7 @@ action(stop_all, [], RpcTimeout) -> io:format("Stopping all nodes...~n", []), - call_all_nodes(fun({Node, Pid}) -> + call_all_nodes(fun ({Node, Pid}) -> io:format("Stopping node ~p~n", [Node]), rpc:call(Node, rabbit, stop_and_halt, []), case kill_wait(Pid, RpcTimeout, false) of @@ -227,11 +211,11 @@ run_rabbitmq_server_win32() -> Cmd = filename:nativename(os:find_executable("cmd")), - CmdLine = "\"" ++ getenv("RABBITMQ_SCRIPT_HOME") - ++ "\\rabbitmq-server.bat\" -noinput", + CmdLine = "\"" ++ getenv("RABBITMQ_SCRIPT_HOME") ++ + "\\rabbitmq-server.bat\" -noinput -detached", erlang:open_port({spawn_executable, Cmd}, [{arg0, Cmd}, {args, ["/q", "/s", "/c", CmdLine]}, - nouse_stdio, hide]). + nouse_stdio]). is_rabbit_running(Node, RpcTimeout) -> case rpc:call(Node, rabbit, status, [], RpcTimeout) of @@ -310,18 +294,28 @@ is_dead(Pid) -> PidS = integer_to_list(Pid), with_os([{unix, fun () -> - Res = os:cmd("ps --no-headers --pid " ++ PidS), - Res == "" + system("kill -0 " ++ PidS + ++ " >/dev/null 2>&1") /= 0 end}, {win32, fun () -> Res = os:cmd("tasklist /nh /fi \"pid eq " ++ - PidS ++ "\""), - case regexp:first_match(Res, "erl.exe") of - {match, _, _} -> false; - _ -> true + PidS ++ "\" 2>&1"), + case re:run(Res, "erl\\.exe", [{capture, none}]) of + match -> false; + _ -> true end end}]). +% Like system(3) +system(Cmd) -> + ShCmd = "sh -c '" ++ escape_quotes(Cmd) ++ "'", + Port = erlang:open_port({spawn, ShCmd}, [exit_status,nouse_stdio]), + receive {Port, {exit_status, Status}} -> Status end. + +% Escape the quotes in a shell command so that it can be used in "sh -c 'cmd'" +escape_quotes(Cmd) -> + lists:flatten(lists:map(fun ($') -> "'\\''"; (Ch) -> Ch end, Cmd)). + call_all_nodes(Func) -> case read_pids_file() of [] -> throw(no_nodes_running); @@ -342,6 +336,8 @@ case application:get_env(rabbit, tcp_listeners) of {ok, [{_IpAddy, _Port} = Listener]} -> Listener; + {ok, [Port]} when is_number(Port) -> + {"0.0.0.0", Port}; {ok, []} -> undefined; {ok, Other} -> diff -Nru rabbitmq-server-1.7.2/src/rabbit_net.erl rabbitmq-server-2.3.1/src/rabbit_net.erl --- rabbitmq-server-1.7.2/src/rabbit_net.erl 2010-02-15 16:01:03.000000000 +0000 +++ rabbitmq-server-2.3.1/src/rabbit_net.erl 2011-02-03 12:47:35.000000000 +0000 @@ -1,132 +1,119 @@ -%% The contents of this file are subject to the Mozilla Public License -%% Version 1.1 (the "License"); you may not use this file except in -%% compliance with the License. You may obtain a copy of the License at -%% http://www.mozilla.org/MPL/ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License +%% at http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and +%% limitations under the License. %% -%% Software distributed under the License is distributed on an "AS IS" -%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the -%% License for the specific language governing rights and limitations -%% under the License. +%% The Original Code is RabbitMQ. %% -%% The Original Code is RabbitMQ. -%% -%% The Initial Developers of the Original Code are LShift Ltd, -%% Cohesive Financial Technologies LLC, and Rabbit Technologies Ltd. -%% -%% Portions created before 22-Nov-2008 00:00:00 GMT by LShift Ltd, -%% Cohesive Financial Technologies LLC, or Rabbit Technologies Ltd -%% are Copyright (C) 2007-2008 LShift Ltd, Cohesive Financial -%% Technologies LLC, and Rabbit Technologies Ltd. -%% -%% Portions created by LShift Ltd are Copyright (C) 2007-2010 LShift -%% Ltd. Portions created by Cohesive Financial Technologies LLC are -%% Copyright (C) 2007-2010 Cohesive Financial Technologies -%% LLC. Portions created by Rabbit Technologies Ltd are Copyright -%% (C) 2007-2010 Rabbit Technologies Ltd. -%% -%% All Rights Reserved. -%% -%% Contributor(s): ______________________________________. +%% The Initial Developer of the Original Code is VMware, Inc. +%% Copyright (c) 2007-2011 VMware, Inc. All rights reserved. %% -module(rabbit_net). -include("rabbit.hrl"). --include_lib("kernel/include/inet.hrl"). --export([async_recv/3, close/1, controlling_process/2, - getstat/2, peername/1, port_command/2, - send/2, sockname/1]). +-export([is_ssl/1, ssl_info/1, controlling_process/2, getstat/2, + async_recv/3, port_command/2, send/2, close/1, + sockname/1, peername/1, peercert/1]). + %%--------------------------------------------------------------------------- -ifdef(use_specs). +-export_type([socket/0]). + -type(stat_option() :: 'recv_cnt' | 'recv_max' | 'recv_avg' | 'recv_oct' | 'recv_dvi' | 'send_cnt' | 'send_max' | 'send_avg' | 'send_oct' | 'send_pend'). --type(error() :: {'error', any()}). - --spec(async_recv/3 :: (socket(), integer(), timeout()) -> {'ok', any()}). --spec(close/1 :: (socket()) -> 'ok' | error()). --spec(controlling_process/2 :: (socket(), pid()) -> 'ok' | error()). +-type(ok_val_or_error(A) :: rabbit_types:ok_or_error2(A, any())). +-type(ok_or_any_error() :: rabbit_types:ok_or_error(any())). +-type(socket() :: port() | #ssl_socket{}). + +-spec(is_ssl/1 :: (socket()) -> boolean()). +-spec(ssl_info/1 :: (socket()) + -> 'nossl' | ok_val_or_error( + {atom(), {atom(), atom(), atom()}})). +-spec(controlling_process/2 :: (socket(), pid()) -> ok_or_any_error()). +-spec(getstat/2 :: + (socket(), [stat_option()]) + -> ok_val_or_error([{stat_option(), integer()}])). +-spec(async_recv/3 :: + (socket(), integer(), timeout()) -> rabbit_types:ok(any())). -spec(port_command/2 :: (socket(), iolist()) -> 'true'). --spec(send/2 :: (socket(), binary() | iolist()) -> 'ok' | error()). --spec(peername/1 :: (socket()) -> - {'ok', {ip_address(), non_neg_integer()}} | error()). --spec(sockname/1 :: (socket()) -> - {'ok', {ip_address(), non_neg_integer()}} | error()). --spec(getstat/2 :: (socket(), [stat_option()]) -> - {'ok', [{stat_option(), integer()}]} | error()). +-spec(send/2 :: (socket(), binary() | iolist()) -> ok_or_any_error()). +-spec(close/1 :: (socket()) -> ok_or_any_error()). +-spec(sockname/1 :: + (socket()) + -> ok_val_or_error({inet:ip_address(), rabbit_networking:ip_port()})). +-spec(peername/1 :: + (socket()) + -> ok_val_or_error({inet:ip_address(), rabbit_networking:ip_port()})). +-spec(peercert/1 :: + (socket()) + -> 'nossl' | ok_val_or_error(rabbit_ssl:certificate())). -endif. %%--------------------------------------------------------------------------- +-define(IS_SSL(Sock), is_record(Sock, ssl_socket)). -async_recv(Sock, Length, Timeout) when is_record(Sock, ssl_socket) -> - Pid = self(), - Ref = make_ref(), - - spawn(fun() -> Pid ! {inet_async, Sock, Ref, - ssl:recv(Sock#ssl_socket.ssl, Length, Timeout)} - end), - - {ok, Ref}; +is_ssl(Sock) -> ?IS_SSL(Sock). -async_recv(Sock, Length, infinity) when is_port(Sock) -> - prim_inet:async_recv(Sock, Length, -1); +ssl_info(Sock) when ?IS_SSL(Sock) -> + ssl:connection_info(Sock#ssl_socket.ssl); +ssl_info(_Sock) -> + nossl. -async_recv(Sock, Length, Timeout) when is_port(Sock) -> - prim_inet:async_recv(Sock, Length, Timeout). - -close(Sock) when is_record(Sock, ssl_socket) -> - ssl:close(Sock#ssl_socket.ssl); - -close(Sock) when is_port(Sock) -> - gen_tcp:close(Sock). - - -controlling_process(Sock, Pid) when is_record(Sock, ssl_socket) -> +controlling_process(Sock, Pid) when ?IS_SSL(Sock) -> ssl:controlling_process(Sock#ssl_socket.ssl, Pid); - controlling_process(Sock, Pid) when is_port(Sock) -> gen_tcp:controlling_process(Sock, Pid). - -getstat(Sock, Stats) when is_record(Sock, ssl_socket) -> +getstat(Sock, Stats) when ?IS_SSL(Sock) -> inet:getstat(Sock#ssl_socket.tcp, Stats); - getstat(Sock, Stats) when is_port(Sock) -> inet:getstat(Sock, Stats). +async_recv(Sock, Length, Timeout) when ?IS_SSL(Sock) -> + Pid = self(), + Ref = make_ref(), -peername(Sock) when is_record(Sock, ssl_socket) -> - ssl:peername(Sock#ssl_socket.ssl); - -peername(Sock) when is_port(Sock) -> - inet:peername(Sock). + spawn(fun () -> Pid ! {inet_async, Sock, Ref, + ssl:recv(Sock#ssl_socket.ssl, Length, Timeout)} + end), + {ok, Ref}; +async_recv(Sock, Length, infinity) when is_port(Sock) -> + prim_inet:async_recv(Sock, Length, -1); +async_recv(Sock, Length, Timeout) when is_port(Sock) -> + prim_inet:async_recv(Sock, Length, Timeout). -port_command(Sock, Data) when is_record(Sock, ssl_socket) -> +port_command(Sock, Data) when ?IS_SSL(Sock) -> case ssl:send(Sock#ssl_socket.ssl, Data) of - ok -> - self() ! {inet_reply, Sock, ok}, - true; - {error, Reason} -> - erlang:error(Reason) + ok -> self() ! {inet_reply, Sock, ok}, + true; + {error, Reason} -> erlang:error(Reason) end; - port_command(Sock, Data) when is_port(Sock) -> erlang:port_command(Sock, Data). -send(Sock, Data) when is_record(Sock, ssl_socket) -> - ssl:send(Sock#ssl_socket.ssl, Data); +send(Sock, Data) when ?IS_SSL(Sock) -> ssl:send(Sock#ssl_socket.ssl, Data); +send(Sock, Data) when is_port(Sock) -> gen_tcp:send(Sock, Data). -send(Sock, Data) when is_port(Sock) -> - gen_tcp:send(Sock, Data). +close(Sock) when ?IS_SSL(Sock) -> ssl:close(Sock#ssl_socket.ssl); +close(Sock) when is_port(Sock) -> gen_tcp:close(Sock). +sockname(Sock) when ?IS_SSL(Sock) -> ssl:sockname(Sock#ssl_socket.ssl); +sockname(Sock) when is_port(Sock) -> inet:sockname(Sock). -sockname(Sock) when is_record(Sock, ssl_socket) -> - ssl:sockname(Sock#ssl_socket.ssl); +peername(Sock) when ?IS_SSL(Sock) -> ssl:peername(Sock#ssl_socket.ssl); +peername(Sock) when is_port(Sock) -> inet:peername(Sock). -sockname(Sock) when is_port(Sock) -> - inet:sockname(Sock). +peercert(Sock) when ?IS_SSL(Sock) -> ssl:peercert(Sock#ssl_socket.ssl); +peercert(Sock) when is_port(Sock) -> nossl. diff -Nru rabbitmq-server-1.7.2/src/rabbit_networking.erl rabbitmq-server-2.3.1/src/rabbit_networking.erl --- rabbitmq-server-1.7.2/src/rabbit_networking.erl 2010-02-15 16:01:03.000000000 +0000 +++ rabbitmq-server-2.3.1/src/rabbit_networking.erl 2011-02-03 12:47:35.000000000 +0000 @@ -1,47 +1,32 @@ -%% The contents of this file are subject to the Mozilla Public License -%% Version 1.1 (the "License"); you may not use this file except in -%% compliance with the License. You may obtain a copy of the License at -%% http://www.mozilla.org/MPL/ -%% -%% Software distributed under the License is distributed on an "AS IS" -%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the -%% License for the specific language governing rights and limitations -%% under the License. -%% -%% The Original Code is RabbitMQ. -%% -%% The Initial Developers of the Original Code are LShift Ltd, -%% Cohesive Financial Technologies LLC, and Rabbit Technologies Ltd. -%% -%% Portions created before 22-Nov-2008 00:00:00 GMT by LShift Ltd, -%% Cohesive Financial Technologies LLC, or Rabbit Technologies Ltd -%% are Copyright (C) 2007-2008 LShift Ltd, Cohesive Financial -%% Technologies LLC, and Rabbit Technologies Ltd. -%% -%% Portions created by LShift Ltd are Copyright (C) 2007-2010 LShift -%% Ltd. Portions created by Cohesive Financial Technologies LLC are -%% Copyright (C) 2007-2010 Cohesive Financial Technologies -%% LLC. Portions created by Rabbit Technologies Ltd are Copyright -%% (C) 2007-2010 Rabbit Technologies Ltd. +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License +%% at http://www.mozilla.org/MPL/ %% -%% All Rights Reserved. +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and +%% limitations under the License. %% -%% Contributor(s): ______________________________________. +%% The Original Code is RabbitMQ. +%% +%% The Initial Developer of the Original Code is VMware, Inc. +%% Copyright (c) 2007-2011 VMware, Inc. All rights reserved. %% -module(rabbit_networking). --export([boot/0, start/0, start_tcp_listener/2, start_ssl_listener/3, - stop_tcp_listener/2, on_node_down/1, active_listeners/0, +-export([boot/0, start/0, start_tcp_listener/1, start_ssl_listener/2, + stop_tcp_listener/1, on_node_down/1, active_listeners/0, node_listeners/1, connections/0, connection_info_keys/0, connection_info/1, connection_info/2, connection_info_all/0, connection_info_all/1, close_connection/2]). %%used by TCP-based transports, e.g. STOMP adapter --export([check_tcp_listener_address/3]). +-export([check_tcp_listener_address/2]). --export([tcp_listener_started/2, tcp_listener_stopped/2, +-export([tcp_listener_started/3, tcp_listener_stopped/3, start_client/1, start_ssl_client/2]). -include("rabbit.hrl"). @@ -51,6 +36,7 @@ binary, {packet, raw}, % no packaging {reuseaddr, true}, % allow rebind without waiting + {backlog, 128}, % use the maximum listen(2) backlog value %% {nodelay, true}, % TCP_NODELAY - disable Nagle's alg. %% {delay_send, true}, {exit_on_close, false} @@ -58,29 +44,40 @@ -define(SSL_TIMEOUT, 5). %% seconds +-define(FIRST_TEST_BIND_PORT, 10000). + %%---------------------------------------------------------------------------- -ifdef(use_specs). --type(host() :: ip_address() | string() | atom()). --type(connection() :: pid()). +-export_type([ip_port/0, hostname/0]). + +-type(family() :: atom()). +-type(listener_config() :: ip_port() | + {hostname(), ip_port()} | + {hostname(), ip_port(), family()}). -spec(start/0 :: () -> 'ok'). --spec(start_tcp_listener/2 :: (host(), ip_port()) -> 'ok'). --spec(start_ssl_listener/3 :: (host(), ip_port(), [info()]) -> 'ok'). --spec(stop_tcp_listener/2 :: (host(), ip_port()) -> 'ok'). --spec(active_listeners/0 :: () -> [listener()]). --spec(node_listeners/1 :: (erlang_node()) -> [listener()]). --spec(connections/0 :: () -> [connection()]). --spec(connection_info_keys/0 :: () -> [info_key()]). --spec(connection_info/1 :: (connection()) -> [info()]). --spec(connection_info/2 :: (connection(), [info_key()]) -> [info()]). --spec(connection_info_all/0 :: () -> [[info()]]). --spec(connection_info_all/1 :: ([info_key()]) -> [[info()]]). +-spec(start_tcp_listener/1 :: (listener_config()) -> 'ok'). +-spec(start_ssl_listener/2 :: + (listener_config(), rabbit_types:infos()) -> 'ok'). +-spec(stop_tcp_listener/1 :: (listener_config()) -> 'ok'). +-spec(active_listeners/0 :: () -> [rabbit_types:listener()]). +-spec(node_listeners/1 :: (node()) -> [rabbit_types:listener()]). +-spec(connections/0 :: () -> [rabbit_types:connection()]). +-spec(connection_info_keys/0 :: () -> rabbit_types:info_keys()). +-spec(connection_info/1 :: + (rabbit_types:connection()) -> rabbit_types:infos()). +-spec(connection_info/2 :: + (rabbit_types:connection(), rabbit_types:info_keys()) + -> rabbit_types:infos()). +-spec(connection_info_all/0 :: () -> [rabbit_types:infos()]). +-spec(connection_info_all/1 :: + (rabbit_types:info_keys()) -> [rabbit_types:infos()]). -spec(close_connection/2 :: (pid(), string()) -> 'ok'). --spec(on_node_down/1 :: (erlang_node()) -> 'ok'). --spec(check_tcp_listener_address/3 :: (atom(), host(), ip_port()) -> - {ip_address(), atom()}). +-spec(on_node_down/1 :: (node()) -> 'ok'). +-spec(check_tcp_listener_address/2 :: (atom(), listener_config()) + -> [{inet:ip_address(), ip_port(), family(), atom()}]). -endif. @@ -93,7 +90,7 @@ boot_tcp() -> {ok, TcpListeners} = application:get_env(tcp_listeners), - [ok = start_tcp_listener(Host, Port) || {Host, Port} <- TcpListeners], + [ok = start_tcp_listener(Listener) || Listener <- TcpListeners], ok. boot_ssl() -> @@ -101,82 +98,143 @@ {ok, []} -> ok; {ok, SslListeners} -> - ok = rabbit_misc:start_applications([crypto, ssl]), - {ok, SslOpts} = application:get_env(ssl_options), - [start_ssl_listener(Host, Port, SslOpts) || {Host, Port} <- SslListeners], + ok = rabbit_misc:start_applications([crypto, public_key, ssl]), + {ok, SslOptsConfig} = application:get_env(ssl_options), + % unknown_ca errors are silently ignored prior to R14B unless we + % supply this verify_fun - remove when at least R14B is required + SslOpts = + case proplists:get_value(verify, SslOptsConfig, verify_none) of + verify_none -> SslOptsConfig; + verify_peer -> [{verify_fun, fun([]) -> true; + ([_|_]) -> false + end} + | SslOptsConfig] + end, + [start_ssl_listener(Listener, SslOpts) || Listener <- SslListeners], ok end. start() -> - {ok,_} = supervisor:start_child( + {ok,_} = supervisor2:start_child( rabbit_sup, {rabbit_tcp_client_sup, - {tcp_client_sup, start_link, + {rabbit_client_sup, start_link, [{local, rabbit_tcp_client_sup}, - {rabbit_reader,start_link,[]}]}, - transient, infinity, supervisor, [tcp_client_sup]}), + {rabbit_connection_sup,start_link,[]}]}, + transient, infinity, supervisor, [rabbit_client_sup]}), ok. -check_tcp_listener_address(NamePrefix, Host, Port) -> - IPAddress = - case inet:getaddr(Host, inet) of - {ok, IPAddress1} -> IPAddress1; - {error, Reason} -> - error_logger:error_msg("invalid host ~p - ~p~n", - [Host, Reason]), - throw({error, {invalid_host, Host, Reason}}) - end, +%% inet_parse:address takes care of ip string, like "0.0.0.0" +%% inet:getaddr returns immediately for ip tuple {0,0,0,0}, +%% and runs 'inet_gethost' port process for dns lookups. +%% On Windows inet:getaddr runs dns resolver for ip string, which may fail. + +getaddr(Host, Family) -> + case inet_parse:address(Host) of + {ok, IPAddress} -> [{IPAddress, resolve_family(IPAddress, Family)}]; + {error, _} -> gethostaddr(Host, Family) + end. + +gethostaddr(Host, auto) -> + Lookups = [{Family, inet:getaddr(Host, Family)} || Family <- [inet, inet6]], + case [{IP, Family} || {Family, {ok, IP}} <- Lookups] of + [] -> host_lookup_error(Host, Lookups); + IPs -> IPs + end; + +gethostaddr(Host, Family) -> + case inet:getaddr(Host, Family) of + {ok, IPAddress} -> [{IPAddress, Family}]; + {error, Reason} -> host_lookup_error(Host, Reason) + end. + +host_lookup_error(Host, Reason) -> + error_logger:error_msg("invalid host ~p - ~p~n", [Host, Reason]), + throw({error, {invalid_host, Host, Reason}}). + +resolve_family({_,_,_,_}, auto) -> inet; +resolve_family({_,_,_,_,_,_,_,_}, auto) -> inet6; +resolve_family(IP, auto) -> throw({error, {strange_family, IP}}); +resolve_family(_, F) -> F. + +check_tcp_listener_address(NamePrefix, Port) when is_integer(Port) -> + check_tcp_listener_address_auto(NamePrefix, Port); + +check_tcp_listener_address(NamePrefix, {"auto", Port}) -> + %% Variant to prevent lots of hacking around in bash and batch files + check_tcp_listener_address_auto(NamePrefix, Port); + +check_tcp_listener_address(NamePrefix, {Host, Port}) -> + %% auto: determine family IPv4 / IPv6 after converting to IP address + check_tcp_listener_address(NamePrefix, {Host, Port, auto}); + +check_tcp_listener_address(NamePrefix, {Host, Port, Family0}) -> if is_integer(Port) andalso (Port >= 0) andalso (Port =< 65535) -> ok; true -> error_logger:error_msg("invalid port ~p - not 0..65535~n", [Port]), throw({error, {invalid_port, Port}}) end, - Name = rabbit_misc:tcp_name(NamePrefix, IPAddress, Port), - {IPAddress, Name}. + [{IPAddress, Port, Family, + rabbit_misc:tcp_name(NamePrefix, IPAddress, Port)} || + {IPAddress, Family} <- getaddr(Host, Family0)]. + +check_tcp_listener_address_auto(NamePrefix, Port) -> + lists:append([check_tcp_listener_address(NamePrefix, Listener) || + Listener <- port_to_listeners(Port)]). -start_tcp_listener(Host, Port) -> - start_listener(Host, Port, "TCP Listener", +start_tcp_listener(Listener) -> + start_listener(Listener, amqp, "TCP Listener", {?MODULE, start_client, []}). -start_ssl_listener(Host, Port, SslOpts) -> - start_listener(Host, Port, "SSL Listener", +start_ssl_listener(Listener, SslOpts) -> + start_listener(Listener, 'amqp/ssl', "SSL Listener", {?MODULE, start_ssl_client, [SslOpts]}). -start_listener(Host, Port, Label, OnConnect) -> - {IPAddress, Name} = - check_tcp_listener_address(rabbit_tcp_listener_sup, Host, Port), +start_listener(Listener, Protocol, Label, OnConnect) -> + [start_listener0(Spec, Protocol, Label, OnConnect) || + Spec <- check_tcp_listener_address(rabbit_tcp_listener_sup, Listener)], + ok. + +start_listener0({IPAddress, Port, Family, Name}, Protocol, Label, OnConnect) -> {ok,_} = supervisor:start_child( rabbit_sup, {Name, {tcp_listener_sup, start_link, - [IPAddress, Port, ?RABBIT_TCP_OPTS , - {?MODULE, tcp_listener_started, []}, - {?MODULE, tcp_listener_stopped, []}, + [IPAddress, Port, [Family | ?RABBIT_TCP_OPTS], + {?MODULE, tcp_listener_started, [Protocol]}, + {?MODULE, tcp_listener_stopped, [Protocol]}, OnConnect, Label]}, - transient, infinity, supervisor, [tcp_listener_sup]}), + transient, infinity, supervisor, [tcp_listener_sup]}). + +stop_tcp_listener(Listener) -> + [stop_tcp_listener0(Spec) || + Spec <- check_tcp_listener_address(rabbit_tcp_listener_sup, Listener)], ok. -stop_tcp_listener(Host, Port) -> - {ok, IPAddress} = inet:getaddr(Host, inet), +stop_tcp_listener0({IPAddress, Port, _Family, Name}) -> Name = rabbit_misc:tcp_name(rabbit_tcp_listener_sup, IPAddress, Port), ok = supervisor:terminate_child(rabbit_sup, Name), - ok = supervisor:delete_child(rabbit_sup, Name), - ok. + ok = supervisor:delete_child(rabbit_sup, Name). -tcp_listener_started(IPAddress, Port) -> +tcp_listener_started(Protocol, IPAddress, Port) -> + %% We need the ip to distinguish e.g. 0.0.0.0 and 127.0.0.1 + %% We need the host so we can distinguish multiple instances of the above + %% in a cluster. ok = mnesia:dirty_write( rabbit_listener, #listener{node = node(), - protocol = tcp, + protocol = Protocol, host = tcp_host(IPAddress), + ip_address = IPAddress, port = Port}). -tcp_listener_stopped(IPAddress, Port) -> +tcp_listener_stopped(Protocol, IPAddress, Port) -> ok = mnesia:dirty_delete_object( rabbit_listener, #listener{node = node(), - protocol = tcp, + protocol = Protocol, host = tcp_host(IPAddress), + ip_address = IPAddress, port = Port}). active_listeners() -> @@ -189,10 +247,10 @@ ok = mnesia:dirty_delete(rabbit_listener, Node). start_client(Sock, SockTransform) -> - {ok, Child} = supervisor:start_child(rabbit_tcp_client_sup, []), - ok = rabbit_net:controlling_process(Sock, Child), - Child ! {go, Sock, SockTransform}, - Child. + {ok, _Child, Reader} = supervisor:start_child(rabbit_tcp_client_sup, []), + ok = rabbit_net:controlling_process(Sock, Reader), + Reader ! {go, Sock, SockTransform}, + Reader. start_client(Sock) -> start_client(Sock, fun (S) -> {ok, S} end). @@ -215,8 +273,10 @@ end). connections() -> - [Pid || {_, Pid, _, _} <- supervisor:which_children( - rabbit_tcp_client_sup)]. + [rabbit_connection_sup:reader(ConnSup) || + Node <- rabbit_mnesia:running_clustered_nodes(), + {_, ConnSup, supervisor, _} + <- supervisor:which_children({rabbit_tcp_client_sup, Node})]. connection_info_keys() -> rabbit_reader:info_keys(). @@ -227,8 +287,7 @@ connection_info_all(Items) -> cmap(fun (Q) -> connection_info(Q, Items) end). close_connection(Pid, Explanation) -> - case lists:any(fun ({_, ChildPid, _, _}) -> ChildPid =:= Pid end, - supervisor:which_children(rabbit_tcp_client_sup)) of + case lists:member(Pid, connections()) of true -> rabbit_reader:shutdown(Pid, Explanation); false -> throw({error, {not_a_connection_pid, Pid}}) end. @@ -236,15 +295,102 @@ %%-------------------------------------------------------------------- tcp_host({0,0,0,0}) -> - {ok, Hostname} = inet:gethostname(), - case inet:gethostbyname(Hostname) of - {ok, #hostent{h_name = Name}} -> Name; - {error, _Reason} -> Hostname - end; + hostname(); + +tcp_host({0,0,0,0,0,0,0,0}) -> + hostname(); + tcp_host(IPAddress) -> case inet:gethostbyaddr(IPAddress) of {ok, #hostent{h_name = Name}} -> Name; - {error, _Reason} -> inet_parse:ntoa(IPAddress) + {error, _Reason} -> rabbit_misc:ntoa(IPAddress) + end. + +hostname() -> + {ok, Hostname} = inet:gethostname(), + case inet:gethostbyname(Hostname) of + {ok, #hostent{h_name = Name}} -> Name; + {error, _Reason} -> Hostname end. cmap(F) -> rabbit_misc:filter_exit_map(F, connections()). + +%%-------------------------------------------------------------------- + +%% There are three kinds of machine (for our purposes). +%% +%% * Those which treat IPv4 addresses as a special kind of IPv6 address +%% ("Single stack") +%% - Linux by default, Windows Vista and later +%% - We also treat any (hypothetical?) IPv6-only machine the same way +%% * Those which consider IPv6 and IPv4 to be completely separate things +%% ("Dual stack") +%% - OpenBSD, Windows XP / 2003, Linux if so configured +%% * Those which do not support IPv6. +%% - Ancient/weird OSes, Linux if so configured +%% +%% How to reconfigure Linux to test this: +%% Single stack (default): +%% echo 0 > /proc/sys/net/ipv6/bindv6only +%% Dual stack: +%% echo 1 > /proc/sys/net/ipv6/bindv6only +%% IPv4 only: +%% add ipv6.disable=1 to GRUB_CMDLINE_LINUX_DEFAULT in /etc/default/grub then +%% sudo update-grub && sudo reboot +%% +%% This matters in (and only in) the case where the sysadmin (or the +%% app descriptor) has only supplied a port and we wish to bind to +%% "all addresses". This means different things depending on whether +%% we're single or dual stack. On single stack binding to "::" +%% implicitly includes all IPv4 addresses, and subsequently attempting +%% to bind to "0.0.0.0" will fail. On dual stack, binding to "::" will +%% only bind to IPv6 addresses, and we need another listener bound to +%% "0.0.0.0" for IPv4. Finally, on IPv4-only systems we of course only +%% want to bind to "0.0.0.0". +%% +%% Unfortunately it seems there is no way to detect single vs dual stack +%% apart from attempting to bind to the port. +port_to_listeners(Port) -> + IPv4 = {"0.0.0.0", Port, inet}, + IPv6 = {"::", Port, inet6}, + case ipv6_status(?FIRST_TEST_BIND_PORT) of + single_stack -> [IPv6]; + ipv6_only -> [IPv6]; + dual_stack -> [IPv6, IPv4]; + ipv4_only -> [IPv4] + end. + +ipv6_status(TestPort) -> + IPv4 = [inet, {ip, {0,0,0,0}}], + IPv6 = [inet6, {ip, {0,0,0,0,0,0,0,0}}], + case gen_tcp:listen(TestPort, IPv6) of + {ok, LSock6} -> + case gen_tcp:listen(TestPort, IPv4) of + {ok, LSock4} -> + %% Dual stack + gen_tcp:close(LSock6), + gen_tcp:close(LSock4), + dual_stack; + %% Checking the error here would only let us + %% distinguish single stack IPv6 / IPv4 vs IPv6 only, + %% which we figure out below anyway. + {error, _} -> + gen_tcp:close(LSock6), + case gen_tcp:listen(TestPort, IPv4) of + %% Single stack + {ok, LSock4} -> gen_tcp:close(LSock4), + single_stack; + %% IPv6-only machine. Welcome to the future. + {error, eafnosupport} -> ipv6_only; + %% Dual stack machine with something already + %% on IPv4. + {error, _} -> ipv6_status(TestPort + 1) + end + end; + {error, eafnosupport} -> + %% IPv4-only machine. Welcome to the 90s. + ipv4_only; + {error, _} -> + %% Port in use + ipv6_status(TestPort + 1) + end. diff -Nru rabbitmq-server-1.7.2/src/rabbit_node_monitor.erl rabbitmq-server-2.3.1/src/rabbit_node_monitor.erl --- rabbitmq-server-1.7.2/src/rabbit_node_monitor.erl 2010-02-15 16:01:03.000000000 +0000 +++ rabbitmq-server-2.3.1/src/rabbit_node_monitor.erl 2011-02-03 12:47:35.000000000 +0000 @@ -1,32 +1,17 @@ -%% The contents of this file are subject to the Mozilla Public License -%% Version 1.1 (the "License"); you may not use this file except in -%% compliance with the License. You may obtain a copy of the License at -%% http://www.mozilla.org/MPL/ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License +%% at http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and +%% limitations under the License. %% -%% Software distributed under the License is distributed on an "AS IS" -%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the -%% License for the specific language governing rights and limitations -%% under the License. +%% The Original Code is RabbitMQ. %% -%% The Original Code is RabbitMQ. -%% -%% The Initial Developers of the Original Code are LShift Ltd, -%% Cohesive Financial Technologies LLC, and Rabbit Technologies Ltd. -%% -%% Portions created before 22-Nov-2008 00:00:00 GMT by LShift Ltd, -%% Cohesive Financial Technologies LLC, or Rabbit Technologies Ltd -%% are Copyright (C) 2007-2008 LShift Ltd, Cohesive Financial -%% Technologies LLC, and Rabbit Technologies Ltd. -%% -%% Portions created by LShift Ltd are Copyright (C) 2007-2010 LShift -%% Ltd. Portions created by Cohesive Financial Technologies LLC are -%% Copyright (C) 2007-2010 Cohesive Financial Technologies -%% LLC. Portions created by Rabbit Technologies Ltd are Copyright -%% (C) 2007-2010 Rabbit Technologies Ltd. -%% -%% All Rights Reserved. -%% -%% Contributor(s): ______________________________________. +%% The Initial Developer of the Original Code is VMware, Inc. +%% Copyright (c) 2007-2011 VMware, Inc. All rights reserved. %% -module(rabbit_node_monitor). diff -Nru rabbitmq-server-1.7.2/src/rabbit_persister.erl rabbitmq-server-2.3.1/src/rabbit_persister.erl --- rabbitmq-server-1.7.2/src/rabbit_persister.erl 2010-02-15 16:01:03.000000000 +0000 +++ rabbitmq-server-2.3.1/src/rabbit_persister.erl 1970-01-01 00:00:00.000000000 +0000 @@ -1,523 +0,0 @@ -%% The contents of this file are subject to the Mozilla Public License -%% Version 1.1 (the "License"); you may not use this file except in -%% compliance with the License. You may obtain a copy of the License at -%% http://www.mozilla.org/MPL/ -%% -%% Software distributed under the License is distributed on an "AS IS" -%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the -%% License for the specific language governing rights and limitations -%% under the License. -%% -%% The Original Code is RabbitMQ. -%% -%% The Initial Developers of the Original Code are LShift Ltd, -%% Cohesive Financial Technologies LLC, and Rabbit Technologies Ltd. -%% -%% Portions created before 22-Nov-2008 00:00:00 GMT by LShift Ltd, -%% Cohesive Financial Technologies LLC, or Rabbit Technologies Ltd -%% are Copyright (C) 2007-2008 LShift Ltd, Cohesive Financial -%% Technologies LLC, and Rabbit Technologies Ltd. -%% -%% Portions created by LShift Ltd are Copyright (C) 2007-2010 LShift -%% Ltd. Portions created by Cohesive Financial Technologies LLC are -%% Copyright (C) 2007-2010 Cohesive Financial Technologies -%% LLC. Portions created by Rabbit Technologies Ltd are Copyright -%% (C) 2007-2010 Rabbit Technologies Ltd. -%% -%% All Rights Reserved. -%% -%% Contributor(s): ______________________________________. -%% - --module(rabbit_persister). - --behaviour(gen_server). - --export([start_link/0]). - --export([init/1, handle_call/3, handle_cast/2, handle_info/2, - terminate/2, code_change/3]). - --export([transaction/1, extend_transaction/2, dirty_work/1, - commit_transaction/1, rollback_transaction/1, - force_snapshot/0, serial/0]). - --include("rabbit.hrl"). - --define(SERVER, ?MODULE). - --define(LOG_BUNDLE_DELAY, 5). --define(COMPLETE_BUNDLE_DELAY, 2). - --define(HIBERNATE_AFTER, 10000). - --define(MAX_WRAP_ENTRIES, 500). - --define(PERSISTER_LOG_FORMAT_VERSION, {2, 4}). - --record(pstate, {log_handle, entry_count, deadline, - pending_logs, pending_replies, - snapshot}). - -%% two tables for efficient persistency -%% one maps a key to a message -%% the other maps a key to one or more queues. -%% The aim is to reduce the overload of storing a message multiple times -%% when it appears in several queues. --record(psnapshot, {serial, transactions, messages, queues}). - -%%---------------------------------------------------------------------------- - --ifdef(use_specs). - --type(qmsg() :: {amqqueue(), pkey()}). --type(work_item() :: - {publish, message(), qmsg()} | - {deliver, qmsg()} | - {ack, qmsg()}). - --spec(start_link/0 :: () -> {'ok', pid()} | 'ignore' | {'error', any()}). --spec(transaction/1 :: ([work_item()]) -> 'ok'). --spec(extend_transaction/2 :: (txn(), [work_item()]) -> 'ok'). --spec(dirty_work/1 :: ([work_item()]) -> 'ok'). --spec(commit_transaction/1 :: (txn()) -> 'ok'). --spec(rollback_transaction/1 :: (txn()) -> 'ok'). --spec(force_snapshot/0 :: () -> 'ok'). --spec(serial/0 :: () -> non_neg_integer()). - --endif. - -%%---------------------------------------------------------------------------- - -start_link() -> - gen_server:start_link({local, ?SERVER}, ?MODULE, [], []). - -transaction(MessageList) -> - ?LOGDEBUG("transaction ~p~n", [MessageList]), - TxnKey = rabbit_guid:guid(), - gen_server:call(?SERVER, {transaction, TxnKey, MessageList}, infinity). - -extend_transaction(TxnKey, MessageList) -> - ?LOGDEBUG("extend_transaction ~p ~p~n", [TxnKey, MessageList]), - gen_server:cast(?SERVER, {extend_transaction, TxnKey, MessageList}). - -dirty_work(MessageList) -> - ?LOGDEBUG("dirty_work ~p~n", [MessageList]), - gen_server:cast(?SERVER, {dirty_work, MessageList}). - -commit_transaction(TxnKey) -> - ?LOGDEBUG("commit_transaction ~p~n", [TxnKey]), - gen_server:call(?SERVER, {commit_transaction, TxnKey}, infinity). - -rollback_transaction(TxnKey) -> - ?LOGDEBUG("rollback_transaction ~p~n", [TxnKey]), - gen_server:cast(?SERVER, {rollback_transaction, TxnKey}). - -force_snapshot() -> - gen_server:call(?SERVER, force_snapshot, infinity). - -serial() -> - gen_server:call(?SERVER, serial, infinity). - -%%-------------------------------------------------------------------- - -init(_Args) -> - process_flag(trap_exit, true), - FileName = base_filename(), - ok = filelib:ensure_dir(FileName), - Snapshot = #psnapshot{serial = 0, - transactions = dict:new(), - messages = ets:new(messages, []), - queues = ets:new(queues, [])}, - LogHandle = - case disk_log:open([{name, rabbit_persister}, - {head, current_snapshot(Snapshot)}, - {file, FileName}]) of - {ok, LH} -> LH; - {repaired, LH, {recovered, Recovered}, {badbytes, Bad}} -> - WarningFun = if - Bad > 0 -> fun rabbit_log:warning/2; - true -> fun rabbit_log:info/2 - end, - WarningFun("Repaired persister log - ~p recovered, ~p bad~n", - [Recovered, Bad]), - LH - end, - {Res, LoadedSnapshot} = internal_load_snapshot(LogHandle, Snapshot), - NewSnapshot = LoadedSnapshot#psnapshot{ - serial = LoadedSnapshot#psnapshot.serial + 1}, - case Res of - ok -> - ok = take_snapshot(LogHandle, NewSnapshot); - {error, Reason} -> - rabbit_log:error("Failed to load persister log: ~p~n", [Reason]), - ok = take_snapshot_and_save_old(LogHandle, NewSnapshot) - end, - State = #pstate{log_handle = LogHandle, - entry_count = 0, - deadline = infinity, - pending_logs = [], - pending_replies = [], - snapshot = NewSnapshot}, - {ok, State}. - -handle_call({transaction, Key, MessageList}, From, State) -> - NewState = internal_extend(Key, MessageList, State), - do_noreply(internal_commit(From, Key, NewState)); -handle_call({commit_transaction, TxnKey}, From, State) -> - do_noreply(internal_commit(From, TxnKey, State)); -handle_call(force_snapshot, _From, State) -> - do_reply(ok, flush(true, State)); -handle_call(serial, _From, - State = #pstate{snapshot = #psnapshot{serial = Serial}}) -> - do_reply(Serial, State); -handle_call(_Request, _From, State) -> - {noreply, State}. - -handle_cast({rollback_transaction, TxnKey}, State) -> - do_noreply(internal_rollback(TxnKey, State)); -handle_cast({dirty_work, MessageList}, State) -> - do_noreply(internal_dirty_work(MessageList, State)); -handle_cast({extend_transaction, TxnKey, MessageList}, State) -> - do_noreply(internal_extend(TxnKey, MessageList, State)); -handle_cast(_Msg, State) -> - {noreply, State}. - -handle_info(timeout, State = #pstate{deadline = infinity}) -> - State1 = flush(true, State), - %% TODO: Once we drop support for R11B-5, we can change this to - %% {noreply, State1, hibernate}; - proc_lib:hibernate(gen_server2, enter_loop, [?MODULE, [], State1]); -handle_info(timeout, State) -> - do_noreply(flush(State)); -handle_info(_Info, State) -> - {noreply, State}. - -terminate(_Reason, State = #pstate{log_handle = LogHandle}) -> - flush(State), - disk_log:close(LogHandle), - ok. - -code_change(_OldVsn, State, _Extra) -> - {ok, flush(State)}. - -%%-------------------------------------------------------------------- - -internal_extend(Key, MessageList, State) -> - log_work(fun (ML) -> {extend_transaction, Key, ML} end, - MessageList, State). - -internal_dirty_work(MessageList, State) -> - log_work(fun (ML) -> {dirty_work, ML} end, - MessageList, State). - -internal_commit(From, Key, State = #pstate{snapshot = Snapshot}) -> - Unit = {commit_transaction, Key}, - NewSnapshot = internal_integrate1(Unit, Snapshot), - complete(From, Unit, State#pstate{snapshot = NewSnapshot}). - -internal_rollback(Key, State = #pstate{snapshot = Snapshot}) -> - Unit = {rollback_transaction, Key}, - NewSnapshot = internal_integrate1(Unit, Snapshot), - log(State#pstate{snapshot = NewSnapshot}, Unit). - -complete(From, Item, State = #pstate{deadline = ExistingDeadline, - pending_logs = Logs, - pending_replies = Waiting}) -> - State#pstate{deadline = compute_deadline( - ?COMPLETE_BUNDLE_DELAY, ExistingDeadline), - pending_logs = [Item | Logs], - pending_replies = [From | Waiting]}. - -%% This is made to limit disk usage by writing messages only once onto -%% disk. We keep a table associating pkeys to messages, and provided -%% the list of messages to output is left to right, we can guarantee -%% that pkeys will be a backreference to a message in memory when a -%% "tied" is met. -log_work(CreateWorkUnit, MessageList, - State = #pstate{ - snapshot = Snapshot = #psnapshot{ - messages = Messages}}) -> - Unit = CreateWorkUnit( - rabbit_misc:map_in_order( - fun(M = {publish, Message, QK = {_QName, PKey}}) -> - case ets:lookup(Messages, PKey) of - [_] -> {tied, QK}; - [] -> ets:insert(Messages, {PKey, Message}), - M - end; - (M) -> M - end, - MessageList)), - NewSnapshot = internal_integrate1(Unit, Snapshot), - log(State#pstate{snapshot = NewSnapshot}, Unit). - -log(State = #pstate{deadline = ExistingDeadline, pending_logs = Logs}, - Message) -> - State#pstate{deadline = compute_deadline(?LOG_BUNDLE_DELAY, - ExistingDeadline), - pending_logs = [Message | Logs]}. - -base_filename() -> - rabbit_mnesia:dir() ++ "/rabbit_persister.LOG". - -take_snapshot(LogHandle, OldFileName, Snapshot) -> - ok = disk_log:sync(LogHandle), - %% current_snapshot is the Head (ie. first thing logged) - ok = disk_log:reopen(LogHandle, OldFileName, current_snapshot(Snapshot)). - -take_snapshot(LogHandle, Snapshot) -> - OldFileName = lists:flatten(base_filename() ++ ".previous"), - file:delete(OldFileName), - rabbit_log:info("Rolling persister log to ~p~n", [OldFileName]), - ok = take_snapshot(LogHandle, OldFileName, Snapshot). - -take_snapshot_and_save_old(LogHandle, Snapshot) -> - {MegaSecs, Secs, MicroSecs} = erlang:now(), - Timestamp = MegaSecs * 1000000 + Secs * 1000 + MicroSecs, - OldFileName = lists:flatten(io_lib:format("~s.saved.~p", - [base_filename(), Timestamp])), - rabbit_log:info("Saving persister log in ~p~n", [OldFileName]), - ok = take_snapshot(LogHandle, OldFileName, Snapshot). - -maybe_take_snapshot(Force, State = #pstate{entry_count = EntryCount, - log_handle = LH, - snapshot = Snapshot}) - when Force orelse EntryCount >= ?MAX_WRAP_ENTRIES -> - ok = take_snapshot(LH, Snapshot), - State#pstate{entry_count = 0}; -maybe_take_snapshot(_Force, State) -> - State. - -later_ms(DeltaMilliSec) -> - {MegaSec, Sec, MicroSec} = now(), - %% Note: not normalised. Unimportant for this application. - {MegaSec, Sec, MicroSec + (DeltaMilliSec * 1000)}. - -%% Result = B - A, more or less -time_diff({B1, B2, B3}, {A1, A2, A3}) -> - (B1 - A1) * 1000000 + (B2 - A2) + (B3 - A3) / 1000000.0 . - -compute_deadline(TimerDelay, infinity) -> - later_ms(TimerDelay); -compute_deadline(_TimerDelay, ExistingDeadline) -> - ExistingDeadline. - -compute_timeout(infinity) -> - ?HIBERNATE_AFTER; -compute_timeout(Deadline) -> - DeltaMilliSec = time_diff(Deadline, now()) * 1000.0, - if - DeltaMilliSec =< 1 -> - 0; - true -> - round(DeltaMilliSec) - end. - -do_noreply(State = #pstate{deadline = Deadline}) -> - {noreply, State, compute_timeout(Deadline)}. - -do_reply(Reply, State = #pstate{deadline = Deadline}) -> - {reply, Reply, State, compute_timeout(Deadline)}. - -flush(State) -> flush(false, State). - -flush(ForceSnapshot, State = #pstate{pending_logs = PendingLogs, - pending_replies = Waiting, - log_handle = LogHandle}) -> - State1 = if PendingLogs /= [] -> - disk_log:alog(LogHandle, lists:reverse(PendingLogs)), - State#pstate{entry_count = State#pstate.entry_count + 1}; - true -> - State - end, - State2 = maybe_take_snapshot(ForceSnapshot, State1), - if Waiting /= [] -> - ok = disk_log:sync(LogHandle), - lists:foreach(fun (From) -> gen_server:reply(From, ok) end, - Waiting); - true -> - ok - end, - State2#pstate{deadline = infinity, - pending_logs = [], - pending_replies = []}. - -current_snapshot(_Snapshot = #psnapshot{serial = Serial, - transactions= Ts, - messages = Messages, - queues = Queues}) -> - %% Avoid infinite growth of the table by removing messages not - %% bound to a queue anymore - prune_table(Messages, ets:foldl( - fun ({{_QName, PKey}, _Delivered}, S) -> - sets:add_element(PKey, S) - end, sets:new(), Queues)), - InnerSnapshot = {{serial, Serial}, - {txns, Ts}, - {messages, ets:tab2list(Messages)}, - {queues, ets:tab2list(Queues)}}, - ?LOGDEBUG("Inner snapshot: ~p~n", [InnerSnapshot]), - {persist_snapshot, {vsn, ?PERSISTER_LOG_FORMAT_VERSION}, - term_to_binary(InnerSnapshot)}. - -prune_table(Tab, Keys) -> - true = ets:safe_fixtable(Tab, true), - ok = prune_table(Tab, Keys, ets:first(Tab)), - true = ets:safe_fixtable(Tab, false). - -prune_table(_Tab, _Keys, '$end_of_table') -> ok; -prune_table(Tab, Keys, Key) -> - case sets:is_element(Key, Keys) of - true -> ok; - false -> ets:delete(Tab, Key) - end, - prune_table(Tab, Keys, ets:next(Tab, Key)). - -internal_load_snapshot(LogHandle, - Snapshot = #psnapshot{messages = Messages, - queues = Queues}) -> - {K, [Loaded_Snapshot | Items]} = disk_log:chunk(LogHandle, start), - case check_version(Loaded_Snapshot) of - {ok, StateBin} -> - {{serial, Serial}, {txns, Ts}, {messages, Ms}, {queues, Qs}} = - binary_to_term(StateBin), - true = ets:insert(Messages, Ms), - true = ets:insert(Queues, Qs), - Snapshot1 = replay(Items, LogHandle, K, - Snapshot#psnapshot{ - serial = Serial, - transactions = Ts}), - Snapshot2 = requeue_messages(Snapshot1), - %% uncompleted transactions are discarded - this is TRTTD - %% since we only get into this code on node restart, so - %% any uncompleted transactions will have been aborted. - {ok, Snapshot2#psnapshot{transactions = dict:new()}}; - {error, Reason} -> {{error, Reason}, Snapshot} - end. - -check_version({persist_snapshot, {vsn, ?PERSISTER_LOG_FORMAT_VERSION}, - StateBin}) -> - {ok, StateBin}; -check_version({persist_snapshot, {vsn, Vsn}, _StateBin}) -> - {error, {unsupported_persister_log_format, Vsn}}; -check_version(_Other) -> - {error, unrecognised_persister_log_format}. - -requeue_messages(Snapshot = #psnapshot{messages = Messages, - queues = Queues}) -> - Work = ets:foldl(fun accumulate_requeues/2, dict:new(), Queues), - %% unstable parallel map, because order doesn't matter - L = lists:append( - rabbit_misc:upmap( - %% we do as much work as possible in spawned worker - %% processes, but we need to make sure the ets:inserts are - %% performed in self() - fun ({QName, Requeues}) -> - requeue(QName, Requeues, Messages) - end, dict:to_list(Work))), - NewMessages = [{K, M} || {{_Q, K}, M, _D} <- L], - NewQueues = [{QK, D} || {QK, _M, D} <- L], - ets:delete_all_objects(Messages), - ets:delete_all_objects(Queues), - true = ets:insert(Messages, NewMessages), - true = ets:insert(Queues, NewQueues), - %% contains the mutated messages and queues tables - Snapshot. - -accumulate_requeues({{QName, PKey}, Delivered}, Acc) -> - Requeue = {PKey, Delivered}, - dict:update(QName, - fun (Requeues) -> [Requeue | Requeues] end, - [Requeue], - Acc). - -requeue(QName, Requeues, Messages) -> - case rabbit_amqqueue:lookup(QName) of - {ok, #amqqueue{pid = QPid}} -> - RequeueMessages = - [{{QName, PKey}, Message, Delivered} || - {PKey, Delivered} <- Requeues, - {_, Message} <- ets:lookup(Messages, PKey)], - rabbit_amqqueue:redeliver( - QPid, - %% Messages published by the same process receive - %% persistence keys that are monotonically - %% increasing. Since message ordering is defined on a - %% per-channel basis, and channels are bound to specific - %% processes, sorting the list does provide the correct - %% ordering properties. - [{Message, Delivered} || {_, Message, Delivered} <- - lists:sort(RequeueMessages)]), - RequeueMessages; - {error, not_found} -> - [] - end. - -replay([], LogHandle, K, Snapshot) -> - case disk_log:chunk(LogHandle, K) of - {K1, Items} -> - replay(Items, LogHandle, K1, Snapshot); - {K1, Items, Badbytes} -> - rabbit_log:warning("~p bad bytes recovering persister log~n", - [Badbytes]), - replay(Items, LogHandle, K1, Snapshot); - eof -> Snapshot - end; -replay([Item | Items], LogHandle, K, Snapshot) -> - NewSnapshot = internal_integrate_messages(Item, Snapshot), - replay(Items, LogHandle, K, NewSnapshot). - -internal_integrate_messages(Items, Snapshot) -> - lists:foldl(fun (Item, Snap) -> internal_integrate1(Item, Snap) end, - Snapshot, Items). - -internal_integrate1({extend_transaction, Key, MessageList}, - Snapshot = #psnapshot {transactions = Transactions}) -> - NewTransactions = - dict:update(Key, - fun (MessageLists) -> [MessageList | MessageLists] end, - [MessageList], - Transactions), - Snapshot#psnapshot{transactions = NewTransactions}; -internal_integrate1({rollback_transaction, Key}, - Snapshot = #psnapshot{transactions = Transactions}) -> - Snapshot#psnapshot{transactions = dict:erase(Key, Transactions)}; -internal_integrate1({commit_transaction, Key}, - Snapshot = #psnapshot{transactions = Transactions, - messages = Messages, - queues = Queues}) -> - case dict:find(Key, Transactions) of - {ok, MessageLists} -> - ?LOGDEBUG("persist committing txn ~p~n", [Key]), - lists:foreach(fun (ML) -> perform_work(ML, Messages, Queues) end, - lists:reverse(MessageLists)), - Snapshot#psnapshot{transactions = dict:erase(Key, Transactions)}; - error -> - Snapshot - end; -internal_integrate1({dirty_work, MessageList}, - Snapshot = #psnapshot {messages = Messages, - queues = Queues}) -> - perform_work(MessageList, Messages, Queues), - Snapshot. - -perform_work(MessageList, Messages, Queues) -> - lists:foreach( - fun (Item) -> perform_work_item(Item, Messages, Queues) end, - MessageList). - -perform_work_item({publish, Message, QK = {_QName, PKey}}, Messages, Queues) -> - ets:insert(Messages, {PKey, Message}), - ets:insert(Queues, {QK, false}); - -perform_work_item({tied, QK}, _Messages, Queues) -> - ets:insert(Queues, {QK, false}); - -perform_work_item({deliver, QK}, _Messages, Queues) -> - %% from R12B-2 onward we could use ets:update_element/3 here - ets:delete(Queues, QK), - ets:insert(Queues, {QK, true}); - -perform_work_item({ack, QK}, _Messages, Queues) -> - ets:delete(Queues, QK). diff -Nru rabbitmq-server-1.7.2/src/rabbit_plugin_activator.erl rabbitmq-server-2.3.1/src/rabbit_plugin_activator.erl --- rabbitmq-server-1.7.2/src/rabbit_plugin_activator.erl 2010-02-15 16:01:03.000000000 +0000 +++ rabbitmq-server-2.3.1/src/rabbit_plugin_activator.erl 1970-01-01 00:00:00.000000000 +0000 @@ -1,254 +0,0 @@ -%% The contents of this file are subject to the Mozilla Public License -%% Version 1.1 (the "License"); you may not use this file except in -%% compliance with the License. You may obtain a copy of the License at -%% http://www.mozilla.org/MPL/ -%% -%% Software distributed under the License is distributed on an "AS IS" -%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the -%% License for the specific language governing rights and limitations -%% under the License. -%% -%% The Original Code is RabbitMQ. -%% -%% The Initial Developers of the Original Code are LShift Ltd, -%% Cohesive Financial Technologies LLC, and Rabbit Technologies Ltd. -%% -%% Portions created before 22-Nov-2008 00:00:00 GMT by LShift Ltd, -%% Cohesive Financial Technologies LLC, or Rabbit Technologies Ltd -%% are Copyright (C) 2007-2008 LShift Ltd, Cohesive Financial -%% Technologies LLC, and Rabbit Technologies Ltd. -%% -%% Portions created by LShift Ltd are Copyright (C) 2007-2010 LShift -%% Ltd. Portions created by Cohesive Financial Technologies LLC are -%% Copyright (C) 2007-2010 Cohesive Financial Technologies -%% LLC. Portions created by Rabbit Technologies Ltd are Copyright -%% (C) 2007-2010 Rabbit Technologies Ltd. -%% -%% All Rights Reserved. -%% -%% Contributor(s): ______________________________________. -%% - --module(rabbit_plugin_activator). - --export([start/0, stop/0]). - --define(DefaultPluginDir, "plugins"). --define(DefaultUnpackedPluginDir, "priv/plugins"). --define(DefaultRabbitEBin, "ebin"). --define(BaseApps, [rabbit]). - -%%---------------------------------------------------------------------------- -%% Specs -%%---------------------------------------------------------------------------- - --ifdef(use_specs). - --spec(start/0 :: () -> no_return()). --spec(stop/0 :: () -> 'ok'). - --endif. - -%%---------------------------------------------------------------------------- - -start() -> - %% Ensure Rabbit is loaded so we can access it's environment - application:load(rabbit), - - %% Determine our various directories - PluginDir = get_env(plugins_dir, ?DefaultPluginDir), - UnpackedPluginDir = get_env(plugins_expand_dir, ?DefaultUnpackedPluginDir), - RabbitEBin = get_env(rabbit_ebin, ?DefaultRabbitEBin), - - RootName = RabbitEBin ++ "/rabbit", - - %% Unpack any .ez plugins - unpack_ez_plugins(PluginDir, UnpackedPluginDir), - - %% Build a list of required apps based on the fixed set, and any plugins - RequiredApps = ?BaseApps ++ - find_plugins(PluginDir) ++ - find_plugins(UnpackedPluginDir), - - %% Build the entire set of dependencies - this will load the - %% applications along the way - AllApps = case catch sets:to_list(expand_dependencies(RequiredApps)) of - {failed_to_load_app, App, Err} -> - error("failed to load application ~s:~n~p", [App, Err]); - AppList -> - AppList - end, - AppVersions = [determine_version(App) || App <- AllApps], - {rabbit, RabbitVersion} = proplists:lookup(rabbit, AppVersions), - - %% Build the overall release descriptor - RDesc = {release, - {"rabbit", RabbitVersion}, - {erts, erlang:system_info(version)}, - AppVersions}, - - %% Write it out to ebin/rabbit.rel - file:write_file(RootName ++ ".rel", io_lib:format("~p.~n", [RDesc])), - - %% Compile the script - ScriptFile = RootName ++ ".script", - case systools:make_script(RootName, [local, silent]) of - {ok, Module, Warnings} -> - %% This gets lots of spurious no-source warnings when we - %% have .ez files, so we want to supress them to prevent - %% hiding real issues. On Ubuntu, we also get warnings - %% about kernel/stdlib sources being out of date, which we - %% also ignore for the same reason. - WarningStr = Module:format_warning( - [W || W <- Warnings, - case W of - {warning, {source_not_found, _}} -> false; - {warning, {obj_out_of_date, {_,_,WApp,_,_}}} - when WApp == mnesia; - WApp == stdlib; - WApp == kernel; - WApp == sasl; - WApp == os_mon -> false; - _ -> true - end]), - case length(WarningStr) of - 0 -> ok; - _ -> io:format("~s", [WarningStr]) - end, - ok; - {error, Module, Error} -> - error("generation of boot script file ~s failed:~n~s", - [ScriptFile, Module:format_error(Error)]) - end, - - case post_process_script(ScriptFile) of - ok -> ok; - {error, Reason} -> - error("post processing of boot script file ~s failed:~n~w", - [ScriptFile, Reason]) - end, - case systools:script2boot(RootName) of - ok -> ok; - error -> error("failed to compile boot script file ~s", [ScriptFile]) - end, - halt(), - ok. - -stop() -> - ok. - -get_env(Key, Default) -> - case application:get_env(rabbit, Key) of - {ok, V} -> V; - _ -> Default - end. - -determine_version(App) -> - application:load(App), - {ok, Vsn} = application:get_key(App, vsn), - {App, Vsn}. - -assert_dir(Dir) -> - case filelib:is_dir(Dir) of - true -> ok; - false -> ok = filelib:ensure_dir(Dir), - ok = file:make_dir(Dir) - end. - -delete_dir(Dir) -> - case filelib:is_dir(Dir) of - true -> - case file:list_dir(Dir) of - {ok, Files} -> - [case Dir ++ "/" ++ F of - Fn -> - case filelib:is_dir(Fn) and not(is_symlink(Fn)) of - true -> delete_dir(Fn); - false -> file:delete(Fn) - end - end || F <- Files] - end, - ok = file:del_dir(Dir); - false -> - ok - end. - -is_symlink(Name) -> - case file:read_link(Name) of - {ok, _} -> true; - _ -> false - end. - -unpack_ez_plugins(PluginSrcDir, PluginDestDir) -> - %% Eliminate the contents of the destination directory - delete_dir(PluginDestDir), - - assert_dir(PluginDestDir), - [unpack_ez_plugin(PluginName, PluginDestDir) || - PluginName <- filelib:wildcard(PluginSrcDir ++ "/*.ez")]. - -unpack_ez_plugin(PluginFn, PluginDestDir) -> - zip:unzip(PluginFn, [{cwd, PluginDestDir}]), - ok. - -find_plugins(PluginDir) -> - [prepare_dir_plugin(PluginName) || - PluginName <- filelib:wildcard(PluginDir ++ "/*/ebin/*.app")]. - -prepare_dir_plugin(PluginAppDescFn) -> - %% Add the plugin ebin directory to the load path - PluginEBinDirN = filename:dirname(PluginAppDescFn), - code:add_path(PluginEBinDirN), - - %% We want the second-last token - NameTokens = string:tokens(PluginAppDescFn,"/."), - PluginNameString = lists:nth(length(NameTokens) - 1, NameTokens), - list_to_atom(PluginNameString). - -expand_dependencies(Pending) -> - expand_dependencies(sets:new(), Pending). -expand_dependencies(Current, []) -> - Current; -expand_dependencies(Current, [Next|Rest]) -> - case sets:is_element(Next, Current) of - true -> - expand_dependencies(Current, Rest); - false -> - case application:load(Next) of - ok -> - ok; - {error, {already_loaded, _}} -> - ok; - {error, Reason} -> - throw({failed_to_load_app, Next, Reason}) - end, - {ok, Required} = application:get_key(Next, applications), - Unique = [A || A <- Required, not(sets:is_element(A, Current))], - expand_dependencies(sets:add_element(Next, Current), Rest ++ Unique) - end. - -post_process_script(ScriptFile) -> - case file:consult(ScriptFile) of - {ok, [{script, Name, Entries}]} -> - NewEntries = lists:flatmap(fun process_entry/1, Entries), - case file:open(ScriptFile, [write]) of - {ok, Fd} -> - io:format(Fd, "%% script generated at ~w ~w~n~p.~n", - [date(), time(), {script, Name, NewEntries}]), - file:close(Fd), - ok; - {error, OReason} -> - {error, {failed_to_open_script_file_for_writing, OReason}} - end; - {error, Reason} -> - {error, {failed_to_load_script, Reason}} - end. - -process_entry(Entry = {apply,{application,start_boot,[stdlib,permanent]}}) -> - [Entry, {apply,{rabbit,prepare,[]}}]; -process_entry(Entry) -> - [Entry]. - -error(Fmt, Args) -> - io:format("ERROR: " ++ Fmt ++ "~n", Args), - halt(1). diff -Nru rabbitmq-server-1.7.2/src/rabbit_prelaunch.erl rabbitmq-server-2.3.1/src/rabbit_prelaunch.erl --- rabbitmq-server-1.7.2/src/rabbit_prelaunch.erl 1970-01-01 00:00:00.000000000 +0000 +++ rabbitmq-server-2.3.1/src/rabbit_prelaunch.erl 2011-02-03 12:47:35.000000000 +0000 @@ -0,0 +1,276 @@ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License +%% at http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and +%% limitations under the License. +%% +%% The Original Code is RabbitMQ. +%% +%% The Initial Developer of the Original Code is VMware, Inc. +%% Copyright (c) 2007-2011 VMware, Inc. All rights reserved. +%% + +-module(rabbit_prelaunch). + +-export([start/0, stop/0]). + +-define(BaseApps, [rabbit]). +-define(ERROR_CODE, 1). + +%%---------------------------------------------------------------------------- +%% Specs +%%---------------------------------------------------------------------------- + +-ifdef(use_specs). + +-spec(start/0 :: () -> no_return()). +-spec(stop/0 :: () -> 'ok'). + +-endif. + +%%---------------------------------------------------------------------------- + +start() -> + io:format("Activating RabbitMQ plugins ...~n"), + + %% Determine our various directories + [PluginDir, UnpackedPluginDir, NodeStr] = init:get_plain_arguments(), + RootName = UnpackedPluginDir ++ "/rabbit", + + %% Unpack any .ez plugins + unpack_ez_plugins(PluginDir, UnpackedPluginDir), + + %% Build a list of required apps based on the fixed set, and any plugins + PluginApps = find_plugins(PluginDir) ++ find_plugins(UnpackedPluginDir), + RequiredApps = ?BaseApps ++ PluginApps, + + %% Build the entire set of dependencies - this will load the + %% applications along the way + AllApps = case catch sets:to_list(expand_dependencies(RequiredApps)) of + {failed_to_load_app, App, Err} -> + terminate("failed to load application ~s:~n~p", + [App, Err]); + AppList -> + AppList + end, + AppVersions = [determine_version(App) || App <- AllApps], + RabbitVersion = proplists:get_value(rabbit, AppVersions), + + %% Build the overall release descriptor + RDesc = {release, + {"rabbit", RabbitVersion}, + {erts, erlang:system_info(version)}, + AppVersions}, + + %% Write it out to $RABBITMQ_PLUGINS_EXPAND_DIR/rabbit.rel + file:write_file(RootName ++ ".rel", io_lib:format("~p.~n", [RDesc])), + + %% Compile the script + ScriptFile = RootName ++ ".script", + case systools:make_script(RootName, [local, silent, exref]) of + {ok, Module, Warnings} -> + %% This gets lots of spurious no-source warnings when we + %% have .ez files, so we want to supress them to prevent + %% hiding real issues. On Ubuntu, we also get warnings + %% about kernel/stdlib sources being out of date, which we + %% also ignore for the same reason. + WarningStr = Module:format_warning( + [W || W <- Warnings, + case W of + {warning, {source_not_found, _}} -> false; + {warning, {obj_out_of_date, {_,_,WApp,_,_}}} + when WApp == mnesia; + WApp == stdlib; + WApp == kernel; + WApp == sasl; + WApp == crypto; + WApp == os_mon -> false; + _ -> true + end]), + case length(WarningStr) of + 0 -> ok; + _ -> io:format("~s", [WarningStr]) + end, + ok; + {error, Module, Error} -> + terminate("generation of boot script file ~s failed:~n~s", + [ScriptFile, Module:format_error(Error)]) + end, + + case post_process_script(ScriptFile) of + ok -> ok; + {error, Reason} -> + terminate("post processing of boot script file ~s failed:~n~w", + [ScriptFile, Reason]) + end, + case systools:script2boot(RootName) of + ok -> ok; + error -> terminate("failed to compile boot script file ~s", + [ScriptFile]) + end, + io:format("~w plugins activated:~n", [length(PluginApps)]), + [io:format("* ~s-~s~n", [App, proplists:get_value(App, AppVersions)]) + || App <- PluginApps], + io:nl(), + + ok = duplicate_node_check(NodeStr), + + terminate(0), + ok. + +stop() -> + ok. + +determine_version(App) -> + application:load(App), + {ok, Vsn} = application:get_key(App, vsn), + {App, Vsn}. + +delete_recursively(Fn) -> + case filelib:is_dir(Fn) and not(is_symlink(Fn)) of + true -> + case file:list_dir(Fn) of + {ok, Files} -> + case lists:foldl(fun ( Fn1, ok) -> delete_recursively( + Fn ++ "/" ++ Fn1); + (_Fn1, Err) -> Err + end, ok, Files) of + ok -> case file:del_dir(Fn) of + ok -> ok; + {error, E} -> {error, + {cannot_delete, Fn, E}} + end; + Err -> Err + end; + {error, E} -> + {error, {cannot_list_files, Fn, E}} + end; + false -> + case filelib:is_file(Fn) of + true -> case file:delete(Fn) of + ok -> ok; + {error, E} -> {error, {cannot_delete, Fn, E}} + end; + false -> ok + end + end. + +is_symlink(Name) -> + case file:read_link(Name) of + {ok, _} -> true; + _ -> false + end. + +unpack_ez_plugins(SrcDir, DestDir) -> + %% Eliminate the contents of the destination directory + case delete_recursively(DestDir) of + ok -> ok; + {error, E} -> terminate("Could not delete dir ~s (~p)", [DestDir, E]) + end, + case filelib:ensure_dir(DestDir ++ "/") of + ok -> ok; + {error, E2} -> terminate("Could not create dir ~s (~p)", [DestDir, E2]) + end, + [unpack_ez_plugin(PluginName, DestDir) || + PluginName <- filelib:wildcard(SrcDir ++ "/*.ez")]. + +unpack_ez_plugin(PluginFn, PluginDestDir) -> + zip:unzip(PluginFn, [{cwd, PluginDestDir}]), + ok. + +find_plugins(PluginDir) -> + [prepare_dir_plugin(PluginName) || + PluginName <- filelib:wildcard(PluginDir ++ "/*/ebin/*.app")]. + +prepare_dir_plugin(PluginAppDescFn) -> + %% Add the plugin ebin directory to the load path + PluginEBinDirN = filename:dirname(PluginAppDescFn), + code:add_path(PluginEBinDirN), + + %% We want the second-last token + NameTokens = string:tokens(PluginAppDescFn,"/."), + PluginNameString = lists:nth(length(NameTokens) - 1, NameTokens), + list_to_atom(PluginNameString). + +expand_dependencies(Pending) -> + expand_dependencies(sets:new(), Pending). +expand_dependencies(Current, []) -> + Current; +expand_dependencies(Current, [Next|Rest]) -> + case sets:is_element(Next, Current) of + true -> + expand_dependencies(Current, Rest); + false -> + case application:load(Next) of + ok -> + ok; + {error, {already_loaded, _}} -> + ok; + {error, Reason} -> + throw({failed_to_load_app, Next, Reason}) + end, + {ok, Required} = application:get_key(Next, applications), + Unique = [A || A <- Required, not(sets:is_element(A, Current))], + expand_dependencies(sets:add_element(Next, Current), Rest ++ Unique) + end. + +post_process_script(ScriptFile) -> + case file:consult(ScriptFile) of + {ok, [{script, Name, Entries}]} -> + NewEntries = lists:flatmap(fun process_entry/1, Entries), + case file:open(ScriptFile, [write]) of + {ok, Fd} -> + io:format(Fd, "%% script generated at ~w ~w~n~p.~n", + [date(), time(), {script, Name, NewEntries}]), + file:close(Fd), + ok; + {error, OReason} -> + {error, {failed_to_open_script_file_for_writing, OReason}} + end; + {error, Reason} -> + {error, {failed_to_load_script, Reason}} + end. + +process_entry(Entry = {apply,{application,start_boot,[rabbit,permanent]}}) -> + [{apply,{rabbit,prepare,[]}}, Entry]; +process_entry(Entry) -> + [Entry]. + +%% Check whether a node with the same name is already running +duplicate_node_check([]) -> + %% Ignore running node while installing windows service + ok; +duplicate_node_check(NodeStr) -> + Node = rabbit_misc:makenode(NodeStr), + {NodeName, NodeHost} = rabbit_misc:nodeparts(Node), + case net_adm:names(NodeHost) of + {ok, NamePorts} -> + case proplists:is_defined(NodeName, NamePorts) of + true -> io:format("node with name ~p " + "already running on ~p~n", + [NodeName, NodeHost]), + [io:format(Fmt ++ "~n", Args) || + {Fmt, Args} <- rabbit_control:diagnostics(Node)], + terminate(?ERROR_CODE); + false -> ok + end; + {error, EpmdReason} -> terminate("unexpected epmd error: ~p~n", + [EpmdReason]) + end. + +terminate(Fmt, Args) -> + io:format("ERROR: " ++ Fmt ++ "~n", Args), + terminate(?ERROR_CODE). + +terminate(Status) -> + case os:type() of + {unix, _} -> halt(Status); + {win32, _} -> init:stop(Status), + receive + after infinity -> ok + end + end. diff -Nru rabbitmq-server-1.7.2/src/rabbit_queue_collector.erl rabbitmq-server-2.3.1/src/rabbit_queue_collector.erl --- rabbitmq-server-1.7.2/src/rabbit_queue_collector.erl 1970-01-01 00:00:00.000000000 +0000 +++ rabbitmq-server-2.3.1/src/rabbit_queue_collector.erl 2011-02-03 12:47:35.000000000 +0000 @@ -0,0 +1,92 @@ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License +%% at http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and +%% limitations under the License. +%% +%% The Original Code is RabbitMQ. +%% +%% The Initial Developer of the Original Code is VMware, Inc. +%% Copyright (c) 2007-2011 VMware, Inc. All rights reserved. +%% + +-module(rabbit_queue_collector). + +-behaviour(gen_server). + +-export([start_link/0, register/2, delete_all/1]). + +-export([init/1, handle_call/3, handle_cast/2, handle_info/2, + terminate/2, code_change/3]). + +-record(state, {queues, delete_from}). + +-include("rabbit.hrl"). + +%%---------------------------------------------------------------------------- + +-ifdef(use_specs). + +-spec(start_link/0 :: () -> rabbit_types:ok_pid_or_error()). +-spec(register/2 :: (pid(), rabbit_types:amqqueue()) -> 'ok'). +-spec(delete_all/1 :: (pid()) -> 'ok'). + +-endif. + +%%---------------------------------------------------------------------------- + +start_link() -> + gen_server:start_link(?MODULE, [], []). + +register(CollectorPid, Q) -> + gen_server:call(CollectorPid, {register, Q}, infinity). + +delete_all(CollectorPid) -> + gen_server:call(CollectorPid, delete_all, infinity). + +%%---------------------------------------------------------------------------- + +init([]) -> + {ok, #state{queues = dict:new(), delete_from = undefined}}. + +%%-------------------------------------------------------------------------- + +handle_call({register, Q}, _From, + State = #state{queues = Queues, delete_from = Deleting}) -> + MonitorRef = erlang:monitor(process, Q#amqqueue.pid), + case Deleting of + undefined -> ok; + _ -> rabbit_amqqueue:delete_immediately(Q) + end, + {reply, ok, State#state{queues = dict:store(MonitorRef, Q, Queues)}}; + +handle_call(delete_all, From, State = #state{queues = Queues, + delete_from = undefined}) -> + case dict:size(Queues) of + 0 -> {reply, ok, State#state{delete_from = From}}; + _ -> [rabbit_amqqueue:delete_immediately(Q) + || {_MRef, Q} <- dict:to_list(Queues)], + {noreply, State#state{delete_from = From}} + end. + +handle_cast(Msg, State) -> + {stop, {unhandled_cast, Msg}, State}. + +handle_info({'DOWN', MonitorRef, process, _DownPid, _Reason}, + State = #state{queues = Queues, delete_from = Deleting}) -> + Queues1 = dict:erase(MonitorRef, Queues), + case Deleting =/= undefined andalso dict:size(Queues1) =:= 0 of + true -> gen_server:reply(Deleting, ok); + false -> ok + end, + {noreply, State#state{queues = Queues1}}. + +terminate(_Reason, _State) -> + ok. + +code_change(_OldVsn, State, _Extra) -> + {ok, State}. diff -Nru rabbitmq-server-1.7.2/src/rabbit_queue_index.erl rabbitmq-server-2.3.1/src/rabbit_queue_index.erl --- rabbitmq-server-1.7.2/src/rabbit_queue_index.erl 1970-01-01 00:00:00.000000000 +0000 +++ rabbitmq-server-2.3.1/src/rabbit_queue_index.erl 2011-02-03 12:47:35.000000000 +0000 @@ -0,0 +1,1071 @@ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License +%% at http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and +%% limitations under the License. +%% +%% The Original Code is RabbitMQ. +%% +%% The Initial Developer of the Original Code is VMware, Inc. +%% Copyright (c) 2007-2011 VMware, Inc. All rights reserved. +%% + +-module(rabbit_queue_index). + +-export([init/2, shutdown_terms/1, recover/5, + terminate/2, delete_and_terminate/1, + publish/5, deliver/2, ack/2, sync/1, sync/2, flush/1, read/3, + next_segment_boundary/1, bounds/1, recover/1]). + +-export([add_queue_ttl/0]). + +-define(CLEAN_FILENAME, "clean.dot"). + +%%---------------------------------------------------------------------------- + +%% The queue index is responsible for recording the order of messages +%% within a queue on disk. +%% +%% Because of the fact that the queue can decide at any point to send +%% a queue entry to disk, you can not rely on publishes appearing in +%% order. The only thing you can rely on is a message being published, +%% then delivered, then ack'd. +%% +%% In order to be able to clean up ack'd messages, we write to segment +%% files. These files have a fixed maximum size: ?SEGMENT_ENTRY_COUNT +%% publishes, delivers and acknowledgements. They are numbered, and so +%% it is known that the 0th segment contains messages 0 -> +%% ?SEGMENT_ENTRY_COUNT - 1, the 1st segment contains messages +%% ?SEGMENT_ENTRY_COUNT -> 2*?SEGMENT_ENTRY_COUNT - 1 and so on. As +%% such, in the segment files, we only refer to message sequence ids +%% by the LSBs as SeqId rem ?SEGMENT_ENTRY_COUNT. This gives them a +%% fixed size. +%% +%% However, transient messages which are not sent to disk at any point +%% will cause gaps to appear in segment files. Therefore, we delete a +%% segment file whenever the number of publishes == number of acks +%% (note that although it is not fully enforced, it is assumed that a +%% message will never be ackd before it is delivered, thus this test +%% also implies == number of delivers). In practise, this does not +%% cause disk churn in the pathological case because of the journal +%% and caching (see below). +%% +%% Because of the fact that publishes, delivers and acks can occur all +%% over, we wish to avoid lots of seeking. Therefore we have a fixed +%% sized journal to which all actions are appended. When the number of +%% entries in this journal reaches max_journal_entries, the journal +%% entries are scattered out to their relevant files, and the journal +%% is truncated to zero size. Note that entries in the journal must +%% carry the full sequence id, thus the format of entries in the +%% journal is different to that in the segments. +%% +%% The journal is also kept fully in memory, pre-segmented: the state +%% contains a mapping from segment numbers to state-per-segment (this +%% state is held for all segments which have been "seen": thus a +%% segment which has been read but has no pending entries in the +%% journal is still held in this mapping. Also note that a dict is +%% used for this mapping, not an array because with an array, you will +%% always have entries from 0). Actions are stored directly in this +%% state. Thus at the point of flushing the journal, firstly no +%% reading from disk is necessary, but secondly if the known number of +%% acks and publishes in a segment are equal, given the known state of +%% the segment file combined with the journal, no writing needs to be +%% done to the segment file either (in fact it is deleted if it exists +%% at all). This is safe given that the set of acks is a subset of the +%% set of publishes. When it's necessary to sync messages because of +%% transactions, it's only necessary to fsync on the journal: when +%% entries are distributed from the journal to segment files, those +%% segments appended to are fsync'd prior to the journal being +%% truncated. +%% +%% This module is also responsible for scanning the queue index files +%% and seeding the message store on start up. +%% +%% Note that in general, the representation of a message's state as +%% the tuple: {('no_pub'|{Guid, MsgProps, IsPersistent}), +%% ('del'|'no_del'), ('ack'|'no_ack')} is richer than strictly +%% necessary for most operations. However, for startup, and to ensure +%% the safe and correct combination of journal entries with entries +%% read from the segment on disk, this richer representation vastly +%% simplifies and clarifies the code. +%% +%% For notes on Clean Shutdown and startup, see documentation in +%% variable_queue. +%% +%%---------------------------------------------------------------------------- + +%% ---- Journal details ---- + +-define(JOURNAL_FILENAME, "journal.jif"). + +-define(PUB_PERSIST_JPREFIX, 2#00). +-define(PUB_TRANS_JPREFIX, 2#01). +-define(DEL_JPREFIX, 2#10). +-define(ACK_JPREFIX, 2#11). +-define(JPREFIX_BITS, 2). +-define(SEQ_BYTES, 8). +-define(SEQ_BITS, ((?SEQ_BYTES * 8) - ?JPREFIX_BITS)). + +%% ---- Segment details ---- + +-define(SEGMENT_EXTENSION, ".idx"). + +%% TODO: The segment size would be configurable, but deriving all the +%% other values is quite hairy and quite possibly noticably less +%% efficient, depending on how clever the compiler is when it comes to +%% binary generation/matching with constant vs variable lengths. + +-define(REL_SEQ_BITS, 14). +-define(SEGMENT_ENTRY_COUNT, 16384). %% trunc(math:pow(2,?REL_SEQ_BITS))). + +%% seq only is binary 00 followed by 14 bits of rel seq id +%% (range: 0 - 16383) +-define(REL_SEQ_ONLY_PREFIX, 00). +-define(REL_SEQ_ONLY_PREFIX_BITS, 2). +-define(REL_SEQ_ONLY_ENTRY_LENGTH_BYTES, 2). + +%% publish record is binary 1 followed by a bit for is_persistent, +%% then 14 bits of rel seq id, 64 bits for message expiry and 128 bits +%% of md5sum msg id +-define(PUBLISH_PREFIX, 1). +-define(PUBLISH_PREFIX_BITS, 1). + +-define(EXPIRY_BYTES, 8). +-define(EXPIRY_BITS, (?EXPIRY_BYTES * 8)). +-define(NO_EXPIRY, 0). + +-define(GUID_BYTES, 16). %% md5sum is 128 bit or 16 bytes +-define(GUID_BITS, (?GUID_BYTES * 8)). +%% 16 bytes for md5sum + 8 for expiry + 2 for seq, bits and prefix +-define(PUBLISH_RECORD_LENGTH_BYTES, ?GUID_BYTES + ?EXPIRY_BYTES + 2). + +%% 1 publish, 1 deliver, 1 ack per msg +-define(SEGMENT_TOTAL_SIZE, ?SEGMENT_ENTRY_COUNT * + (?PUBLISH_RECORD_LENGTH_BYTES + + (2 * ?REL_SEQ_ONLY_ENTRY_LENGTH_BYTES))). + +%% ---- misc ---- + +-define(PUB, {_, _, _}). %% {Guid, MsgProps, IsPersistent} + +-define(READ_MODE, [binary, raw, read]). +-define(READ_AHEAD_MODE, [{read_ahead, ?SEGMENT_TOTAL_SIZE} | ?READ_MODE]). +-define(WRITE_MODE, [write | ?READ_MODE]). + +%%---------------------------------------------------------------------------- + +-record(qistate, { dir, segments, journal_handle, dirty_count, + max_journal_entries, on_sync, unsynced_guids }). + +-record(segment, { num, path, journal_entries, unacked }). + +-include("rabbit.hrl"). + +%%---------------------------------------------------------------------------- + +-rabbit_upgrade({add_queue_ttl, []}). + +-ifdef(use_specs). + +-type(hdl() :: ('undefined' | any())). +-type(segment() :: ('undefined' | + #segment { num :: non_neg_integer(), + path :: file:filename(), + journal_entries :: array(), + unacked :: non_neg_integer() + })). +-type(seq_id() :: integer()). +-type(seg_dict() :: {dict(), [segment()]}). +-type(on_sync_fun() :: fun ((gb_set()) -> ok)). +-type(qistate() :: #qistate { dir :: file:filename(), + segments :: 'undefined' | seg_dict(), + journal_handle :: hdl(), + dirty_count :: integer(), + max_journal_entries :: non_neg_integer(), + on_sync :: on_sync_fun(), + unsynced_guids :: [rabbit_guid:guid()] + }). +-type(startup_fun_state() :: + {fun ((A) -> 'finished' | {rabbit_guid:guid(), non_neg_integer(), A}), + A}). +-type(shutdown_terms() :: [any()]). + +-spec(init/2 :: (rabbit_amqqueue:name(), on_sync_fun()) -> qistate()). +-spec(shutdown_terms/1 :: (rabbit_amqqueue:name()) -> shutdown_terms()). +-spec(recover/5 :: (rabbit_amqqueue:name(), shutdown_terms(), boolean(), + fun ((rabbit_guid:guid()) -> boolean()), on_sync_fun()) -> + {'undefined' | non_neg_integer(), qistate()}). +-spec(terminate/2 :: ([any()], qistate()) -> qistate()). +-spec(delete_and_terminate/1 :: (qistate()) -> qistate()). +-spec(publish/5 :: (rabbit_guid:guid(), seq_id(), + rabbit_types:message_properties(), boolean(), qistate()) + -> qistate()). +-spec(deliver/2 :: ([seq_id()], qistate()) -> qistate()). +-spec(ack/2 :: ([seq_id()], qistate()) -> qistate()). +-spec(sync/2 :: ([seq_id()], qistate()) -> qistate()). +-spec(flush/1 :: (qistate()) -> qistate()). +-spec(read/3 :: (seq_id(), seq_id(), qistate()) -> + {[{rabbit_guid:guid(), seq_id(), + rabbit_types:message_properties(), + boolean(), boolean()}], qistate()}). +-spec(next_segment_boundary/1 :: (seq_id()) -> seq_id()). +-spec(bounds/1 :: (qistate()) -> + {non_neg_integer(), non_neg_integer(), qistate()}). +-spec(recover/1 :: ([rabbit_amqqueue:name()]) -> + {[[any()]], startup_fun_state()}). + +-spec(add_queue_ttl/0 :: () -> 'ok'). + +-endif. + + +%%---------------------------------------------------------------------------- +%% public API +%%---------------------------------------------------------------------------- + +init(Name, OnSyncFun) -> + State = #qistate { dir = Dir } = blank_state(Name), + false = filelib:is_file(Dir), %% is_file == is file or dir + State #qistate { on_sync = OnSyncFun }. + +shutdown_terms(Name) -> + #qistate { dir = Dir } = blank_state(Name), + case read_shutdown_terms(Dir) of + {error, _} -> []; + {ok, Terms1} -> Terms1 + end. + +recover(Name, Terms, MsgStoreRecovered, ContainsCheckFun, OnSyncFun) -> + State = #qistate { dir = Dir } = blank_state(Name), + State1 = State #qistate { on_sync = OnSyncFun }, + CleanShutdown = detect_clean_shutdown(Dir), + case CleanShutdown andalso MsgStoreRecovered of + true -> RecoveredCounts = proplists:get_value(segments, Terms, []), + init_clean(RecoveredCounts, State1); + false -> init_dirty(CleanShutdown, ContainsCheckFun, State1) + end. + +terminate(Terms, State) -> + {SegmentCounts, State1 = #qistate { dir = Dir }} = terminate(State), + store_clean_shutdown([{segments, SegmentCounts} | Terms], Dir), + State1. + +delete_and_terminate(State) -> + {_SegmentCounts, State1 = #qistate { dir = Dir }} = terminate(State), + ok = rabbit_misc:recursive_delete([Dir]), + State1. + +publish(Guid, SeqId, MsgProps, IsPersistent, + State = #qistate { unsynced_guids = UnsyncedGuids }) + when is_binary(Guid) -> + ?GUID_BYTES = size(Guid), + {JournalHdl, State1} = get_journal_handle( + State #qistate { + unsynced_guids = [Guid | UnsyncedGuids] }), + ok = file_handle_cache:append( + JournalHdl, [<<(case IsPersistent of + true -> ?PUB_PERSIST_JPREFIX; + false -> ?PUB_TRANS_JPREFIX + end):?JPREFIX_BITS, + SeqId:?SEQ_BITS>>, + create_pub_record_body(Guid, MsgProps)]), + maybe_flush_journal( + add_to_journal(SeqId, {Guid, MsgProps, IsPersistent}, State1)). + +deliver(SeqIds, State) -> + deliver_or_ack(del, SeqIds, State). + +ack(SeqIds, State) -> + deliver_or_ack(ack, SeqIds, State). + +%% This is only called when there are outstanding confirms and the +%% queue is idle. +sync(State = #qistate { unsynced_guids = Guids }) -> + sync_if([] =/= Guids, State). + +sync(SeqIds, State) -> + %% The SeqIds here contains the SeqId of every publish and ack in + %% the transaction. Ideally we should go through these seqids and + %% only sync the journal if the pubs or acks appear in the + %% journal. However, this would be complex to do, and given that + %% the variable queue publishes and acks to the qi, and then + %% syncs, all in one operation, there is no possibility of the + %% seqids not being in the journal, provided the transaction isn't + %% emptied (handled by sync_if anyway). + sync_if([] =/= SeqIds, State). + +flush(State = #qistate { dirty_count = 0 }) -> State; +flush(State) -> flush_journal(State). + +read(StartEnd, StartEnd, State) -> + {[], State}; +read(Start, End, State = #qistate { segments = Segments, + dir = Dir }) when Start =< End -> + %% Start is inclusive, End is exclusive. + LowerB = {StartSeg, _StartRelSeq} = seq_id_to_seg_and_rel_seq_id(Start), + UpperB = {EndSeg, _EndRelSeq} = seq_id_to_seg_and_rel_seq_id(End - 1), + {Messages, Segments1} = + lists:foldr(fun (Seg, Acc) -> + read_bounded_segment(Seg, LowerB, UpperB, Acc, Dir) + end, {[], Segments}, lists:seq(StartSeg, EndSeg)), + {Messages, State #qistate { segments = Segments1 }}. + +next_segment_boundary(SeqId) -> + {Seg, _RelSeq} = seq_id_to_seg_and_rel_seq_id(SeqId), + reconstruct_seq_id(Seg + 1, 0). + +bounds(State = #qistate { segments = Segments }) -> + %% This is not particularly efficient, but only gets invoked on + %% queue initialisation. + SegNums = lists:sort(segment_nums(Segments)), + %% Don't bother trying to figure out the lowest seq_id, merely the + %% seq_id of the start of the lowest segment. That seq_id may not + %% actually exist, but that's fine. The important thing is that + %% the segment exists and the seq_id reported is on a segment + %% boundary. + %% + %% We also don't really care about the max seq_id. Just start the + %% next segment: it makes life much easier. + %% + %% SegNums is sorted, ascending. + {LowSeqId, NextSeqId} = + case SegNums of + [] -> {0, 0}; + [MinSeg|_] -> {reconstruct_seq_id(MinSeg, 0), + reconstruct_seq_id(1 + lists:last(SegNums), 0)} + end, + {LowSeqId, NextSeqId, State}. + +recover(DurableQueues) -> + DurableDict = dict:from_list([ {queue_name_to_dir_name(Queue), Queue} || + Queue <- DurableQueues ]), + QueuesDir = queues_dir(), + QueueDirNames = all_queue_directory_names(QueuesDir), + DurableDirectories = sets:from_list(dict:fetch_keys(DurableDict)), + {DurableQueueNames, DurableTerms} = + lists:foldl( + fun (QueueDirName, {DurableAcc, TermsAcc}) -> + QueueDirPath = filename:join(QueuesDir, QueueDirName), + case sets:is_element(QueueDirName, DurableDirectories) of + true -> + TermsAcc1 = + case read_shutdown_terms(QueueDirPath) of + {error, _} -> TermsAcc; + {ok, Terms} -> [Terms | TermsAcc] + end, + {[dict:fetch(QueueDirName, DurableDict) | DurableAcc], + TermsAcc1}; + false -> + ok = rabbit_misc:recursive_delete([QueueDirPath]), + {DurableAcc, TermsAcc} + end + end, {[], []}, QueueDirNames), + {DurableTerms, {fun queue_index_walker/1, {start, DurableQueueNames}}}. + +all_queue_directory_names(Dir) -> + case file:list_dir(Dir) of + {ok, Entries} -> [ Entry || Entry <- Entries, + filelib:is_dir( + filename:join(Dir, Entry)) ]; + {error, enoent} -> [] + end. + +%%---------------------------------------------------------------------------- +%% startup and shutdown +%%---------------------------------------------------------------------------- + +blank_state(QueueName) -> + Dir = filename:join(queues_dir(), queue_name_to_dir_name(QueueName)), + {ok, MaxJournal} = + application:get_env(rabbit, queue_index_max_journal_entries), + #qistate { dir = Dir, + segments = segments_new(), + journal_handle = undefined, + dirty_count = 0, + max_journal_entries = MaxJournal, + on_sync = fun (_) -> ok end, + unsynced_guids = [] }. + +clean_file_name(Dir) -> filename:join(Dir, ?CLEAN_FILENAME). + +detect_clean_shutdown(Dir) -> + case file:delete(clean_file_name(Dir)) of + ok -> true; + {error, enoent} -> false + end. + +read_shutdown_terms(Dir) -> + rabbit_misc:read_term_file(clean_file_name(Dir)). + +store_clean_shutdown(Terms, Dir) -> + CleanFileName = clean_file_name(Dir), + ok = filelib:ensure_dir(CleanFileName), + rabbit_misc:write_term_file(CleanFileName, Terms). + +init_clean(RecoveredCounts, State) -> + %% Load the journal. Since this is a clean recovery this (almost) + %% gets us back to where we were on shutdown. + State1 = #qistate { dir = Dir, segments = Segments } = load_journal(State), + %% The journal loading only creates records for segments touched + %% by the journal, and the counts are based on the journal entries + %% only. We need *complete* counts for *all* segments. By an + %% amazing coincidence we stored that information on shutdown. + Segments1 = + lists:foldl( + fun ({Seg, UnackedCount}, SegmentsN) -> + Segment = segment_find_or_new(Seg, Dir, SegmentsN), + segment_store(Segment #segment { unacked = UnackedCount }, + SegmentsN) + end, Segments, RecoveredCounts), + %% the counts above include transient messages, which would be the + %% wrong thing to return + {undefined, State1 # qistate { segments = Segments1 }}. + +init_dirty(CleanShutdown, ContainsCheckFun, State) -> + %% Recover the journal completely. This will also load segments + %% which have entries in the journal and remove duplicates. The + %% counts will correctly reflect the combination of the segment + %% and the journal. + State1 = #qistate { dir = Dir, segments = Segments } = + recover_journal(State), + {Segments1, Count} = + %% Load each segment in turn and filter out messages that are + %% not in the msg_store, by adding acks to the journal. These + %% acks only go to the RAM journal as it doesn't matter if we + %% lose them. Also mark delivered if not clean shutdown. Also + %% find the number of unacked messages. + lists:foldl( + fun (Seg, {Segments2, CountAcc}) -> + Segment = #segment { unacked = UnackedCount } = + recover_segment(ContainsCheckFun, CleanShutdown, + segment_find_or_new(Seg, Dir, Segments2)), + {segment_store(Segment, Segments2), CountAcc + UnackedCount} + end, {Segments, 0}, all_segment_nums(State1)), + %% Unconditionally flush since the dirty_count doesn't get updated + %% by the above foldl. + State2 = flush_journal(State1 #qistate { segments = Segments1 }), + {Count, State2}. + +terminate(State = #qistate { journal_handle = JournalHdl, + segments = Segments }) -> + ok = case JournalHdl of + undefined -> ok; + _ -> file_handle_cache:close(JournalHdl) + end, + SegmentCounts = + segment_fold( + fun (#segment { num = Seg, unacked = UnackedCount }, Acc) -> + [{Seg, UnackedCount} | Acc] + end, [], Segments), + {SegmentCounts, State #qistate { journal_handle = undefined, + segments = undefined }}. + +recover_segment(ContainsCheckFun, CleanShutdown, + Segment = #segment { journal_entries = JEntries }) -> + {SegEntries, UnackedCount} = load_segment(false, Segment), + {SegEntries1, UnackedCountDelta} = + segment_plus_journal(SegEntries, JEntries), + array:sparse_foldl( + fun (RelSeq, {{Guid, _MsgProps, _IsPersistent}, Del, no_ack}, Segment1) -> + recover_message(ContainsCheckFun(Guid), CleanShutdown, + Del, RelSeq, Segment1) + end, + Segment #segment { unacked = UnackedCount + UnackedCountDelta }, + SegEntries1). + +recover_message( true, true, _Del, _RelSeq, Segment) -> + Segment; +recover_message( true, false, del, _RelSeq, Segment) -> + Segment; +recover_message( true, false, no_del, RelSeq, Segment) -> + add_to_journal(RelSeq, del, Segment); +recover_message(false, _, del, RelSeq, Segment) -> + add_to_journal(RelSeq, ack, Segment); +recover_message(false, _, no_del, RelSeq, Segment) -> + add_to_journal(RelSeq, ack, add_to_journal(RelSeq, del, Segment)). + +queue_name_to_dir_name(Name = #resource { kind = queue }) -> + <> = erlang:md5(term_to_binary(Name)), + lists:flatten(io_lib:format("~.36B", [Num])). + +queues_dir() -> + filename:join(rabbit_mnesia:dir(), "queues"). + +%%---------------------------------------------------------------------------- +%% msg store startup delta function +%%---------------------------------------------------------------------------- + +queue_index_walker({start, DurableQueues}) when is_list(DurableQueues) -> + {ok, Gatherer} = gatherer:start_link(), + [begin + ok = gatherer:fork(Gatherer), + ok = worker_pool:submit_async( + fun () -> queue_index_walker_reader(QueueName, Gatherer) + end) + end || QueueName <- DurableQueues], + queue_index_walker({next, Gatherer}); + +queue_index_walker({next, Gatherer}) when is_pid(Gatherer) -> + case gatherer:out(Gatherer) of + empty -> + ok = gatherer:stop(Gatherer), + ok = rabbit_misc:unlink_and_capture_exit(Gatherer), + finished; + {value, {Guid, Count}} -> + {Guid, Count, {next, Gatherer}} + end. + +queue_index_walker_reader(QueueName, Gatherer) -> + State = #qistate { segments = Segments, dir = Dir } = + recover_journal(blank_state(QueueName)), + [ok = segment_entries_foldr( + fun (_RelSeq, {{Guid, _MsgProps, true}, _IsDelivered, no_ack}, + ok) -> + gatherer:in(Gatherer, {Guid, 1}); + (_RelSeq, _Value, Acc) -> + Acc + end, ok, segment_find_or_new(Seg, Dir, Segments)) || + Seg <- all_segment_nums(State)], + {_SegmentCounts, _State} = terminate(State), + ok = gatherer:finish(Gatherer). + +%%---------------------------------------------------------------------------- +%% expiry/binary manipulation +%%---------------------------------------------------------------------------- + +create_pub_record_body(Guid, #message_properties{expiry = Expiry}) -> + [Guid, expiry_to_binary(Expiry)]. + +expiry_to_binary(undefined) -> <>; +expiry_to_binary(Expiry) -> <>. + +read_pub_record_body(Hdl) -> + case file_handle_cache:read(Hdl, ?GUID_BYTES + ?EXPIRY_BYTES) of + {ok, Bin} -> + %% work around for binary data fragmentation. See + %% rabbit_msg_file:read_next/2 + <> = Bin, + <> = <>, + Exp = case Expiry of + ?NO_EXPIRY -> undefined; + X -> X + end, + {Guid, #message_properties{expiry = Exp}}; + Error -> + Error + end. + +%%---------------------------------------------------------------------------- +%% journal manipulation +%%---------------------------------------------------------------------------- + +add_to_journal(SeqId, Action, State = #qistate { dirty_count = DCount, + segments = Segments, + dir = Dir }) -> + {Seg, RelSeq} = seq_id_to_seg_and_rel_seq_id(SeqId), + Segment = segment_find_or_new(Seg, Dir, Segments), + Segment1 = add_to_journal(RelSeq, Action, Segment), + State #qistate { dirty_count = DCount + 1, + segments = segment_store(Segment1, Segments) }; + +add_to_journal(RelSeq, Action, + Segment = #segment { journal_entries = JEntries, + unacked = UnackedCount }) -> + Segment1 = Segment #segment { + journal_entries = add_to_journal(RelSeq, Action, JEntries) }, + case Action of + del -> Segment1; + ack -> Segment1 #segment { unacked = UnackedCount - 1 }; + ?PUB -> Segment1 #segment { unacked = UnackedCount + 1 } + end; + +add_to_journal(RelSeq, Action, JEntries) -> + Val = case array:get(RelSeq, JEntries) of + undefined -> + case Action of + ?PUB -> {Action, no_del, no_ack}; + del -> {no_pub, del, no_ack}; + ack -> {no_pub, no_del, ack} + end; + ({Pub, no_del, no_ack}) when Action == del -> + {Pub, del, no_ack}; + ({Pub, Del, no_ack}) when Action == ack -> + {Pub, Del, ack} + end, + array:set(RelSeq, Val, JEntries). + +maybe_flush_journal(State = #qistate { dirty_count = DCount, + max_journal_entries = MaxJournal }) + when DCount > MaxJournal -> + flush_journal(State); +maybe_flush_journal(State) -> + State. + +flush_journal(State = #qistate { segments = Segments }) -> + Segments1 = + segment_fold( + fun (#segment { unacked = 0, path = Path }, SegmentsN) -> + case filelib:is_file(Path) of + true -> ok = file:delete(Path); + false -> ok + end, + SegmentsN; + (#segment {} = Segment, SegmentsN) -> + segment_store(append_journal_to_segment(Segment), SegmentsN) + end, segments_new(), Segments), + {JournalHdl, State1} = + get_journal_handle(State #qistate { segments = Segments1 }), + ok = file_handle_cache:clear(JournalHdl), + notify_sync(State1 #qistate { dirty_count = 0 }). + +append_journal_to_segment(#segment { journal_entries = JEntries, + path = Path } = Segment) -> + case array:sparse_size(JEntries) of + 0 -> Segment; + _ -> {ok, Hdl} = file_handle_cache:open(Path, ?WRITE_MODE, + [{write_buffer, infinity}]), + array:sparse_foldl(fun write_entry_to_segment/3, Hdl, JEntries), + ok = file_handle_cache:close(Hdl), + Segment #segment { journal_entries = array_new() } + end. + +get_journal_handle(State = #qistate { journal_handle = undefined, + dir = Dir }) -> + Path = filename:join(Dir, ?JOURNAL_FILENAME), + ok = filelib:ensure_dir(Path), + {ok, Hdl} = file_handle_cache:open(Path, ?WRITE_MODE, + [{write_buffer, infinity}]), + {Hdl, State #qistate { journal_handle = Hdl }}; +get_journal_handle(State = #qistate { journal_handle = Hdl }) -> + {Hdl, State}. + +%% Loading Journal. This isn't idempotent and will mess up the counts +%% if you call it more than once on the same state. Assumes the counts +%% are 0 to start with. +load_journal(State) -> + {JournalHdl, State1} = get_journal_handle(State), + {ok, 0} = file_handle_cache:position(JournalHdl, 0), + load_journal_entries(State1). + +%% ditto +recover_journal(State) -> + State1 = #qistate { segments = Segments } = load_journal(State), + Segments1 = + segment_map( + fun (Segment = #segment { journal_entries = JEntries, + unacked = UnackedCountInJournal }) -> + %% We want to keep ack'd entries in so that we can + %% remove them if duplicates are in the journal. The + %% counts here are purely from the segment itself. + {SegEntries, UnackedCountInSeg} = load_segment(true, Segment), + {JEntries1, UnackedCountDuplicates} = + journal_minus_segment(JEntries, SegEntries), + Segment #segment { journal_entries = JEntries1, + unacked = (UnackedCountInJournal + + UnackedCountInSeg - + UnackedCountDuplicates) } + end, Segments), + State1 #qistate { segments = Segments1 }. + +load_journal_entries(State = #qistate { journal_handle = Hdl }) -> + case file_handle_cache:read(Hdl, ?SEQ_BYTES) of + {ok, <>} -> + case Prefix of + ?DEL_JPREFIX -> + load_journal_entries(add_to_journal(SeqId, del, State)); + ?ACK_JPREFIX -> + load_journal_entries(add_to_journal(SeqId, ack, State)); + _ -> + case read_pub_record_body(Hdl) of + {Guid, MsgProps} -> + Publish = {Guid, MsgProps, + case Prefix of + ?PUB_PERSIST_JPREFIX -> true; + ?PUB_TRANS_JPREFIX -> false + end}, + load_journal_entries( + add_to_journal(SeqId, Publish, State)); + _ErrOrEoF -> %% err, we've lost at least a publish + State + end + end; + _ErrOrEoF -> State + end. + +deliver_or_ack(_Kind, [], State) -> + State; +deliver_or_ack(Kind, SeqIds, State) -> + JPrefix = case Kind of ack -> ?ACK_JPREFIX; del -> ?DEL_JPREFIX end, + {JournalHdl, State1} = get_journal_handle(State), + ok = file_handle_cache:append( + JournalHdl, + [<> || SeqId <- SeqIds]), + maybe_flush_journal(lists:foldl(fun (SeqId, StateN) -> + add_to_journal(SeqId, Kind, StateN) + end, State1, SeqIds)). + +sync_if(false, State) -> + State; +sync_if(_Bool, State = #qistate { journal_handle = undefined }) -> + State; +sync_if(true, State = #qistate { journal_handle = JournalHdl }) -> + ok = file_handle_cache:sync(JournalHdl), + notify_sync(State). + +notify_sync(State = #qistate { unsynced_guids = UG, on_sync = OnSyncFun }) -> + OnSyncFun(gb_sets:from_list(UG)), + State #qistate { unsynced_guids = [] }. + +%%---------------------------------------------------------------------------- +%% segment manipulation +%%---------------------------------------------------------------------------- + +seq_id_to_seg_and_rel_seq_id(SeqId) -> + { SeqId div ?SEGMENT_ENTRY_COUNT, SeqId rem ?SEGMENT_ENTRY_COUNT }. + +reconstruct_seq_id(Seg, RelSeq) -> + (Seg * ?SEGMENT_ENTRY_COUNT) + RelSeq. + +all_segment_nums(#qistate { dir = Dir, segments = Segments }) -> + lists:sort( + sets:to_list( + lists:foldl( + fun (SegName, Set) -> + sets:add_element( + list_to_integer( + lists:takewhile(fun (C) -> $0 =< C andalso C =< $9 end, + SegName)), Set) + end, sets:from_list(segment_nums(Segments)), + filelib:wildcard("*" ++ ?SEGMENT_EXTENSION, Dir)))). + +segment_find_or_new(Seg, Dir, Segments) -> + case segment_find(Seg, Segments) of + {ok, Segment} -> Segment; + error -> SegName = integer_to_list(Seg) ++ ?SEGMENT_EXTENSION, + Path = filename:join(Dir, SegName), + #segment { num = Seg, + path = Path, + journal_entries = array_new(), + unacked = 0 } + end. + +segment_find(Seg, {_Segments, [Segment = #segment { num = Seg } |_]}) -> + {ok, Segment}; %% 1 or (2, matches head) +segment_find(Seg, {_Segments, [_, Segment = #segment { num = Seg }]}) -> + {ok, Segment}; %% 2, matches tail +segment_find(Seg, {Segments, _}) -> %% no match + dict:find(Seg, Segments). + +segment_store(Segment = #segment { num = Seg }, %% 1 or (2, matches head) + {Segments, [#segment { num = Seg } | Tail]}) -> + {Segments, [Segment | Tail]}; +segment_store(Segment = #segment { num = Seg }, %% 2, matches tail + {Segments, [SegmentA, #segment { num = Seg }]}) -> + {Segments, [Segment, SegmentA]}; +segment_store(Segment = #segment { num = Seg }, {Segments, []}) -> + {dict:erase(Seg, Segments), [Segment]}; +segment_store(Segment = #segment { num = Seg }, {Segments, [SegmentA]}) -> + {dict:erase(Seg, Segments), [Segment, SegmentA]}; +segment_store(Segment = #segment { num = Seg }, + {Segments, [SegmentA, SegmentB]}) -> + {dict:store(SegmentB#segment.num, SegmentB, dict:erase(Seg, Segments)), + [Segment, SegmentA]}. + +segment_fold(Fun, Acc, {Segments, CachedSegments}) -> + dict:fold(fun (_Seg, Segment, Acc1) -> Fun(Segment, Acc1) end, + lists:foldl(Fun, Acc, CachedSegments), Segments). + +segment_map(Fun, {Segments, CachedSegments}) -> + {dict:map(fun (_Seg, Segment) -> Fun(Segment) end, Segments), + lists:map(Fun, CachedSegments)}. + +segment_nums({Segments, CachedSegments}) -> + lists:map(fun (#segment { num = Num }) -> Num end, CachedSegments) ++ + dict:fetch_keys(Segments). + +segments_new() -> + {dict:new(), []}. + +write_entry_to_segment(_RelSeq, {?PUB, del, ack}, Hdl) -> + Hdl; +write_entry_to_segment(RelSeq, {Pub, Del, Ack}, Hdl) -> + ok = case Pub of + no_pub -> + ok; + {Guid, MsgProps, IsPersistent} -> + file_handle_cache:append( + Hdl, [<>, + create_pub_record_body(Guid, MsgProps)]) + end, + ok = case {Del, Ack} of + {no_del, no_ack} -> + ok; + _ -> + Binary = <>, + file_handle_cache:append( + Hdl, case {Del, Ack} of + {del, ack} -> [Binary, Binary]; + _ -> Binary + end) + end, + Hdl. + +read_bounded_segment(Seg, {StartSeg, StartRelSeq}, {EndSeg, EndRelSeq}, + {Messages, Segments}, Dir) -> + Segment = segment_find_or_new(Seg, Dir, Segments), + {segment_entries_foldr( + fun (RelSeq, {{Guid, MsgProps, IsPersistent}, IsDelivered, no_ack}, Acc) + when (Seg > StartSeg orelse StartRelSeq =< RelSeq) andalso + (Seg < EndSeg orelse EndRelSeq >= RelSeq) -> + [ {Guid, reconstruct_seq_id(StartSeg, RelSeq), MsgProps, + IsPersistent, IsDelivered == del} | Acc ]; + (_RelSeq, _Value, Acc) -> + Acc + end, Messages, Segment), + segment_store(Segment, Segments)}. + +segment_entries_foldr(Fun, Init, + Segment = #segment { journal_entries = JEntries }) -> + {SegEntries, _UnackedCount} = load_segment(false, Segment), + {SegEntries1, _UnackedCountD} = segment_plus_journal(SegEntries, JEntries), + array:sparse_foldr(Fun, Init, SegEntries1). + +%% Loading segments +%% +%% Does not do any combining with the journal at all. +load_segment(KeepAcked, #segment { path = Path }) -> + case filelib:is_file(Path) of + false -> {array_new(), 0}; + true -> {ok, Hdl} = file_handle_cache:open(Path, ?READ_AHEAD_MODE, []), + {ok, 0} = file_handle_cache:position(Hdl, bof), + Res = load_segment_entries(KeepAcked, Hdl, array_new(), 0), + ok = file_handle_cache:close(Hdl), + Res + end. + +load_segment_entries(KeepAcked, Hdl, SegEntries, UnackedCount) -> + case file_handle_cache:read(Hdl, ?REL_SEQ_ONLY_ENTRY_LENGTH_BYTES) of + {ok, <>} -> + {Guid, MsgProps} = read_pub_record_body(Hdl), + Obj = {{Guid, MsgProps, 1 == IsPersistentNum}, no_del, no_ack}, + SegEntries1 = array:set(RelSeq, Obj, SegEntries), + load_segment_entries(KeepAcked, Hdl, SegEntries1, + UnackedCount + 1); + {ok, <>} -> + {UnackedCountDelta, SegEntries1} = + case array:get(RelSeq, SegEntries) of + {Pub, no_del, no_ack} -> + { 0, array:set(RelSeq, {Pub, del, no_ack}, SegEntries)}; + {Pub, del, no_ack} when KeepAcked -> + {-1, array:set(RelSeq, {Pub, del, ack}, SegEntries)}; + {_Pub, del, no_ack} -> + {-1, array:reset(RelSeq, SegEntries)} + end, + load_segment_entries(KeepAcked, Hdl, SegEntries1, + UnackedCount + UnackedCountDelta); + _ErrOrEoF -> + {SegEntries, UnackedCount} + end. + +array_new() -> + array:new([{default, undefined}, fixed, {size, ?SEGMENT_ENTRY_COUNT}]). + +bool_to_int(true ) -> 1; +bool_to_int(false) -> 0. + +%%---------------------------------------------------------------------------- +%% journal & segment combination +%%---------------------------------------------------------------------------- + +%% Combine what we have just read from a segment file with what we're +%% holding for that segment in memory. There must be no duplicates. +segment_plus_journal(SegEntries, JEntries) -> + array:sparse_foldl( + fun (RelSeq, JObj, {SegEntriesOut, AdditionalUnacked}) -> + SegEntry = array:get(RelSeq, SegEntriesOut), + {Obj, AdditionalUnackedDelta} = + segment_plus_journal1(SegEntry, JObj), + {case Obj of + undefined -> array:reset(RelSeq, SegEntriesOut); + _ -> array:set(RelSeq, Obj, SegEntriesOut) + end, + AdditionalUnacked + AdditionalUnackedDelta} + end, {SegEntries, 0}, JEntries). + +%% Here, the result is a tuple with the first element containing the +%% item which we may be adding to (for items only in the journal), +%% modifying in (bits in both), or, when returning 'undefined', +%% erasing from (ack in journal, not segment) the segment array. The +%% other element of the tuple is the delta for AdditionalUnacked. +segment_plus_journal1(undefined, {?PUB, no_del, no_ack} = Obj) -> + {Obj, 1}; +segment_plus_journal1(undefined, {?PUB, del, no_ack} = Obj) -> + {Obj, 1}; +segment_plus_journal1(undefined, {?PUB, del, ack}) -> + {undefined, 0}; + +segment_plus_journal1({?PUB = Pub, no_del, no_ack}, {no_pub, del, no_ack}) -> + {{Pub, del, no_ack}, 0}; +segment_plus_journal1({?PUB, no_del, no_ack}, {no_pub, del, ack}) -> + {undefined, -1}; +segment_plus_journal1({?PUB, del, no_ack}, {no_pub, no_del, ack}) -> + {undefined, -1}. + +%% Remove from the journal entries for a segment, items that are +%% duplicates of entries found in the segment itself. Used on start up +%% to clean up the journal. +journal_minus_segment(JEntries, SegEntries) -> + array:sparse_foldl( + fun (RelSeq, JObj, {JEntriesOut, UnackedRemoved}) -> + SegEntry = array:get(RelSeq, SegEntries), + {Obj, UnackedRemovedDelta} = + journal_minus_segment1(JObj, SegEntry), + {case Obj of + keep -> JEntriesOut; + undefined -> array:reset(RelSeq, JEntriesOut); + _ -> array:set(RelSeq, Obj, JEntriesOut) + end, + UnackedRemoved + UnackedRemovedDelta} + end, {JEntries, 0}, JEntries). + +%% Here, the result is a tuple with the first element containing the +%% item we are adding to or modifying in the (initially fresh) journal +%% array. If the item is 'undefined' we leave the journal array +%% alone. The other element of the tuple is the deltas for +%% UnackedRemoved. + +%% Both the same. Must be at least the publish +journal_minus_segment1({?PUB, _Del, no_ack} = Obj, Obj) -> + {undefined, 1}; +journal_minus_segment1({?PUB, _Del, ack} = Obj, Obj) -> + {undefined, 0}; + +%% Just publish in journal +journal_minus_segment1({?PUB, no_del, no_ack}, undefined) -> + {keep, 0}; + +%% Publish and deliver in journal +journal_minus_segment1({?PUB, del, no_ack}, undefined) -> + {keep, 0}; +journal_minus_segment1({?PUB = Pub, del, no_ack}, {Pub, no_del, no_ack}) -> + {{no_pub, del, no_ack}, 1}; + +%% Publish, deliver and ack in journal +journal_minus_segment1({?PUB, del, ack}, undefined) -> + {keep, 0}; +journal_minus_segment1({?PUB = Pub, del, ack}, {Pub, no_del, no_ack}) -> + {{no_pub, del, ack}, 1}; +journal_minus_segment1({?PUB = Pub, del, ack}, {Pub, del, no_ack}) -> + {{no_pub, no_del, ack}, 1}; + +%% Just deliver in journal +journal_minus_segment1({no_pub, del, no_ack}, {?PUB, no_del, no_ack}) -> + {keep, 0}; +journal_minus_segment1({no_pub, del, no_ack}, {?PUB, del, no_ack}) -> + {undefined, 0}; + +%% Just ack in journal +journal_minus_segment1({no_pub, no_del, ack}, {?PUB, del, no_ack}) -> + {keep, 0}; +journal_minus_segment1({no_pub, no_del, ack}, {?PUB, del, ack}) -> + {undefined, -1}; + +%% Deliver and ack in journal +journal_minus_segment1({no_pub, del, ack}, {?PUB, no_del, no_ack}) -> + {keep, 0}; +journal_minus_segment1({no_pub, del, ack}, {?PUB, del, no_ack}) -> + {{no_pub, no_del, ack}, 0}; +journal_minus_segment1({no_pub, del, ack}, {?PUB, del, ack}) -> + {undefined, -1}. + +%%---------------------------------------------------------------------------- +%% upgrade +%%---------------------------------------------------------------------------- + +add_queue_ttl() -> + foreach_queue_index({fun add_queue_ttl_journal/1, + fun add_queue_ttl_segment/1}). + +add_queue_ttl_journal(<>) -> + {<>, Rest}; +add_queue_ttl_journal(<>) -> + {<>, Rest}; +add_queue_ttl_journal(<>) -> + {[<>, Guid, + expiry_to_binary(undefined)], Rest}; +add_queue_ttl_journal(_) -> + stop. + +add_queue_ttl_segment(<>) -> + {[<>, Guid, expiry_to_binary(undefined)], Rest}; +add_queue_ttl_segment(<>) -> + {<>, + Rest}; +add_queue_ttl_segment(_) -> + stop. + +%%---------------------------------------------------------------------------- + +foreach_queue_index(Funs) -> + QueuesDir = queues_dir(), + QueueDirNames = all_queue_directory_names(QueuesDir), + {ok, Gatherer} = gatherer:start_link(), + [begin + ok = gatherer:fork(Gatherer), + ok = worker_pool:submit_async( + fun () -> + transform_queue(filename:join(QueuesDir, QueueDirName), + Gatherer, Funs) + end) + end || QueueDirName <- QueueDirNames], + empty = gatherer:out(Gatherer), + ok = gatherer:stop(Gatherer), + ok = rabbit_misc:unlink_and_capture_exit(Gatherer). + +transform_queue(Dir, Gatherer, {JournalFun, SegmentFun}) -> + ok = transform_file(filename:join(Dir, ?JOURNAL_FILENAME), JournalFun), + [ok = transform_file(filename:join(Dir, Seg), SegmentFun) + || Seg <- filelib:wildcard("*" ++ ?SEGMENT_EXTENSION, Dir)], + ok = gatherer:finish(Gatherer). + +transform_file(Path, Fun) -> + PathTmp = Path ++ ".upgrade", + case filelib:file_size(Path) of + 0 -> ok; + Size -> {ok, PathTmpHdl} = + file_handle_cache:open(PathTmp, ?WRITE_MODE, + [{write_buffer, infinity}]), + + {ok, PathHdl} = file_handle_cache:open( + Path, [{read_ahead, Size} | ?READ_MODE], []), + {ok, Content} = file_handle_cache:read(PathHdl, Size), + ok = file_handle_cache:close(PathHdl), + + ok = drive_transform_fun(Fun, PathTmpHdl, Content), + + ok = file_handle_cache:close(PathTmpHdl), + ok = file:rename(PathTmp, Path) + end. + +drive_transform_fun(Fun, Hdl, Contents) -> + case Fun(Contents) of + stop -> ok; + {Output, Contents1} -> ok = file_handle_cache:append(Hdl, Output), + drive_transform_fun(Fun, Hdl, Contents1) + end. diff -Nru rabbitmq-server-1.7.2/src/rabbit_reader.erl rabbitmq-server-2.3.1/src/rabbit_reader.erl --- rabbitmq-server-1.7.2/src/rabbit_reader.erl 2010-02-15 16:01:03.000000000 +0000 +++ rabbitmq-server-2.3.1/src/rabbit_reader.erl 2011-02-03 12:47:35.000000000 +0000 @@ -1,64 +1,61 @@ -%% The contents of this file are subject to the Mozilla Public License -%% Version 1.1 (the "License"); you may not use this file except in -%% compliance with the License. You may obtain a copy of the License at -%% http://www.mozilla.org/MPL/ -%% -%% Software distributed under the License is distributed on an "AS IS" -%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the -%% License for the specific language governing rights and limitations -%% under the License. -%% -%% The Original Code is RabbitMQ. -%% -%% The Initial Developers of the Original Code are LShift Ltd, -%% Cohesive Financial Technologies LLC, and Rabbit Technologies Ltd. -%% -%% Portions created before 22-Nov-2008 00:00:00 GMT by LShift Ltd, -%% Cohesive Financial Technologies LLC, or Rabbit Technologies Ltd -%% are Copyright (C) 2007-2008 LShift Ltd, Cohesive Financial -%% Technologies LLC, and Rabbit Technologies Ltd. -%% -%% Portions created by LShift Ltd are Copyright (C) 2007-2010 LShift -%% Ltd. Portions created by Cohesive Financial Technologies LLC are -%% Copyright (C) 2007-2010 Cohesive Financial Technologies -%% LLC. Portions created by Rabbit Technologies Ltd are Copyright -%% (C) 2007-2010 Rabbit Technologies Ltd. +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License +%% at http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and +%% limitations under the License. %% -%% All Rights Reserved. +%% The Original Code is RabbitMQ. %% -%% Contributor(s): ______________________________________. +%% The Initial Developer of the Original Code is VMware, Inc. +%% Copyright (c) 2007-2011 VMware, Inc. All rights reserved. %% -module(rabbit_reader). -include("rabbit_framing.hrl"). -include("rabbit.hrl"). --export([start_link/0, info_keys/0, info/1, info/2, shutdown/2]). +-export([start_link/3, info_keys/0, info/1, info/2, shutdown/2]). -export([system_continue/3, system_terminate/4, system_code_change/4]). --export([init/1, mainloop/3]). +-export([init/4, mainloop/2]). --export([analyze_frame/2]). +-export([conserve_memory/2, server_properties/0]). --import(gen_tcp). --import(fprof). --import(inet). --import(prim_inet). +-export([process_channel_frame/5]). %% used by erlang-client + +-export([emit_stats/1]). -define(HANDSHAKE_TIMEOUT, 10). -define(NORMAL_TIMEOUT, 3). -define(CLOSING_TIMEOUT, 1). -define(CHANNEL_TERMINATION_TIMEOUT, 3). +-define(SILENT_CLOSE_DELAY, 3). +-define(FRAME_MAX, 131072). %% set to zero once QPid fix their negotiation %--------------------------------------------------------------------------- --record(v1, {sock, connection, callback, recv_ref, connection_state}). +-record(v1, {parent, sock, connection, callback, recv_length, recv_ref, + connection_state, queue_collector, heartbeater, stats_timer, + channel_sup_sup_pid, start_heartbeat_fun, auth_mechanism, + auth_state}). + +-define(STATISTICS_KEYS, [pid, recv_oct, recv_cnt, send_oct, send_cnt, + send_pend, state, channels]). + +-define(CREATION_EVENT_KEYS, [pid, address, port, peer_address, peer_port, ssl, + peer_cert_subject, peer_cert_issuer, + peer_cert_validity, auth_mechanism, + ssl_protocol, ssl_key_exchange, + ssl_cipher, ssl_hash, + protocol, user, vhost, timeout, frame_max, + client_properties]). --define(INFO_KEYS, - [pid, address, port, peer_address, peer_port, - recv_oct, recv_cnt, send_oct, send_cnt, send_pend, - state, channels, user, vhost, timeout, frame_max, client_properties]). +-define(INFO_KEYS, ?CREATION_EVENT_KEYS ++ ?STATISTICS_KEYS -- [pid]). %% connection lifecycle %% @@ -96,8 +93,21 @@ %% -> log error, mark channel as closing, *running* %% handshake_timeout -> ignore, *running* %% heartbeat timeout -> *throw* +%% conserve_memory=true -> *blocking* +%% blocking: +%% conserve_memory=true -> *blocking* +%% conserve_memory=false -> *running* +%% receive a method frame for a content-bearing method +%% -> process, stop receiving, *blocked* +%% ...rest same as 'running' +%% blocked: +%% conserve_memory=true -> *blocked* +%% conserve_memory=false -> resume receiving, *running* +%% ...rest same as 'running' %% closing: %% socket close -> *terminate* +%% receive connection.close -> send connection.close_ok, +%% *closing* %% receive frame -> ignore, *closing* %% handshake_timeout -> ignore, *closing* %% heartbeat timeout -> *throw* @@ -114,6 +124,8 @@ %% start terminate_connection timer, *closed* %% closed: %% socket close -> *terminate* +%% receive connection.close -> send connection.close_ok, +%% *closed* %% receive connection.close_ok -> self() ! terminate_connection, %% *closed* %% receive frame -> ignore, *closed* @@ -125,34 +137,57 @@ %% %% TODO: refactor the code so that the above is obvious +-define(IS_RUNNING(State), + (State#v1.connection_state =:= running orelse + State#v1.connection_state =:= blocking orelse + State#v1.connection_state =:= blocked)). + %%---------------------------------------------------------------------------- -ifdef(use_specs). --spec(info_keys/0 :: () -> [info_key()]). --spec(info/1 :: (pid()) -> [info()]). --spec(info/2 :: (pid(), [info_key()]) -> [info()]). +-spec(start_link/3 :: (pid(), pid(), rabbit_heartbeat:start_heartbeat_fun()) -> + rabbit_types:ok(pid())). +-spec(info_keys/0 :: () -> rabbit_types:info_keys()). +-spec(info/1 :: (pid()) -> rabbit_types:infos()). +-spec(info/2 :: (pid(), rabbit_types:info_keys()) -> rabbit_types:infos()). +-spec(emit_stats/1 :: (pid()) -> 'ok'). -spec(shutdown/2 :: (pid(), string()) -> 'ok'). +-spec(conserve_memory/2 :: (pid(), boolean()) -> 'ok'). +-spec(server_properties/0 :: () -> rabbit_framing:amqp_table()). + +%% These specs only exists to add no_return() to keep dialyzer happy +-spec(init/4 :: (pid(), pid(), pid(), rabbit_heartbeat:start_heartbeat_fun()) + -> no_return()). +-spec(start_connection/7 :: + (pid(), pid(), pid(), rabbit_heartbeat:start_heartbeat_fun(), any(), + rabbit_net:socket(), + fun ((rabbit_net:socket()) -> + rabbit_types:ok_or_error2( + rabbit_net:socket(), any()))) -> no_return()). -endif. %%-------------------------------------------------------------------------- -start_link() -> - {ok, proc_lib:spawn_link(?MODULE, init, [self()])}. +start_link(ChannelSupSupPid, Collector, StartHeartbeatFun) -> + {ok, proc_lib:spawn_link(?MODULE, init, [self(), ChannelSupSupPid, + Collector, StartHeartbeatFun])}. shutdown(Pid, Explanation) -> gen_server:call(Pid, {shutdown, Explanation}, infinity). -init(Parent) -> +init(Parent, ChannelSupSupPid, Collector, StartHeartbeatFun) -> Deb = sys:debug_options([]), receive {go, Sock, SockTransform} -> - start_connection(Parent, Deb, Sock, SockTransform) + start_connection( + Parent, ChannelSupSupPid, Collector, StartHeartbeatFun, Deb, Sock, + SockTransform) end. system_continue(Parent, Deb, State) -> - ?MODULE:mainloop(Parent, Deb, State). + ?MODULE:mainloop(Deb, State#v1{parent = Parent}). system_terminate(Reason, _Parent, _Deb, _State) -> exit(Reason). @@ -171,32 +206,39 @@ {error, Error} -> throw(Error) end. -setup_profiling() -> - Value = rabbit_misc:get_config(profiling_enabled, false), - case Value of - once -> - rabbit_log:info("Enabling profiling for this connection, " - "and disabling for subsequent.~n"), - rabbit_misc:set_config(profiling_enabled, false), - fprof:trace(start); - true -> - rabbit_log:info("Enabling profiling for this connection.~n"), - fprof:trace(start); - false -> - ok - end, - Value. +emit_stats(Pid) -> + gen_server:cast(Pid, emit_stats). -teardown_profiling(Value) -> - case Value of - false -> - ok; - _ -> - rabbit_log:info("Completing profiling for this connection.~n"), - fprof:trace(stop), - fprof:profile(), - fprof:analyse([{dest, []}, {cols, 100}]) - end. +conserve_memory(Pid, Conserve) -> + Pid ! {conserve_memory, Conserve}, + ok. + +server_properties() -> + {ok, Product} = application:get_key(rabbit, id), + {ok, Version} = application:get_key(rabbit, vsn), + + %% Get any configuration-specified server properties + {ok, RawConfigServerProps} = application:get_env(rabbit, + server_properties), + + %% Normalize the simplifed (2-tuple) and unsimplified (3-tuple) forms + %% from the config and merge them with the generated built-in properties + NormalizedConfigServerProps = + [case X of + {KeyAtom, Value} -> {list_to_binary(atom_to_list(KeyAtom)), + longstr, + list_to_binary(Value)}; + {BinKey, Type, Value} -> {BinKey, Type, Value} + end || X <- RawConfigServerProps ++ + [{product, Product}, + {version, Version}, + {platform, "Erlang/OTP"}, + {copyright, ?COPYRIGHT_MESSAGE}, + {information, ?INFORMATION_MESSAGE}]], + + %% Filter duplicated properties in favor of config file provided values + lists:usort(fun ({K1,_,_}, {K2,_,_}) -> K1 =< K2 end, + NormalizedConfigServerProps). inet_op(F) -> rabbit_misc:throw_on_error(inet_error, F). @@ -210,29 +252,41 @@ exit(normal) end. -start_connection(Parent, Deb, Sock, SockTransform) -> +start_connection(Parent, ChannelSupSupPid, Collector, StartHeartbeatFun, Deb, + Sock, SockTransform) -> process_flag(trap_exit, true), {PeerAddress, PeerPort} = socket_op(Sock, fun rabbit_net:peername/1), - PeerAddressS = inet_parse:ntoa(PeerAddress), + PeerAddressS = rabbit_misc:ntoab(PeerAddress), rabbit_log:info("starting TCP connection ~p from ~s:~p~n", [self(), PeerAddressS, PeerPort]), ClientSock = socket_op(Sock, SockTransform), erlang:send_after(?HANDSHAKE_TIMEOUT * 1000, self(), handshake_timeout), - ProfilingValue = setup_profiling(), try - mainloop(Parent, Deb, switch_callback( - #v1{sock = ClientSock, - connection = #connection{ - user = none, - timeout_sec = ?HANDSHAKE_TIMEOUT, - frame_max = ?FRAME_MIN_SIZE, - vhost = none, - client_properties = none}, - callback = uninitialized_callback, - recv_ref = none, - connection_state = pre_init}, - handshake, 8)) + mainloop(Deb, switch_callback( + #v1{parent = Parent, + sock = ClientSock, + connection = #connection{ + protocol = none, + user = none, + timeout_sec = ?HANDSHAKE_TIMEOUT, + frame_max = ?FRAME_MIN_SIZE, + vhost = none, + client_properties = none}, + callback = uninitialized_callback, + recv_length = 0, + recv_ref = none, + connection_state = pre_init, + queue_collector = Collector, + heartbeater = none, + stats_timer = + rabbit_event:init_stats_timer(), + channel_sup_sup_pid = ChannelSupSupPid, + start_heartbeat_fun = StartHeartbeatFun, + auth_mechanism = none, + auth_state = none + }, + handshake, 8)) catch Ex -> (if Ex == connection_closed_abruptly -> fun rabbit_log:warning/2; @@ -249,19 +303,15 @@ %% output to be sent, which results in unnecessary delays. %% %% gen_tcp:close(ClientSock), - teardown_profiling(ProfilingValue) + rabbit_event:notify(connection_closed, [{pid, self()}]) end, done. -mainloop(Parent, Deb, State = #v1{sock= Sock, recv_ref = Ref}) -> - %%?LOGDEBUG("Reader mainloop: ~p bytes available, need ~p~n", [HaveBytes, WaitUntilNBytes]), +mainloop(Deb, State = #v1{parent = Parent, sock= Sock, recv_ref = Ref}) -> receive {inet_async, Sock, Ref, {ok, Data}} -> - {State1, Callback1, Length1} = - handle_input(State#v1.callback, Data, - State#v1{recv_ref = none}), - mainloop(Parent, Deb, - switch_callback(State1, Callback1, Length1)); + mainloop(Deb, handle_input(State#v1.callback, Data, + State#v1{recv_ref = none})); {inet_async, Sock, Ref, {error, closed}} -> if State#v1.connection_state =:= closed -> State; @@ -270,6 +320,8 @@ end; {inet_async, Sock, Ref, {error, Reason}} -> throw({inet_error, Reason}); + {conserve_memory, Conserve} -> + mainloop(Deb, internal_conserve_memory(Conserve, State)); {'EXIT', Parent, Reason} -> terminate(io_lib:format("broker forced connection closure " "with reason '~w'", [Reason]), State), @@ -282,19 +334,19 @@ %% since this termination is initiated by our parent it is %% probably more important to exit quickly. exit(Reason); - {channel_exit, _Chan, E = {writer, send_failed, _Error}} -> + {channel_exit, _Channel, E = {writer, send_failed, _Error}} -> throw(E); {channel_exit, Channel, Reason} -> - mainloop(Parent, Deb, handle_channel_exit(Channel, Reason, State)); - {'EXIT', Pid, Reason} -> - mainloop(Parent, Deb, handle_dependent_exit(Pid, Reason, State)); + mainloop(Deb, handle_exception(State, Channel, Reason)); + {'DOWN', _MRef, process, ChPid, Reason} -> + mainloop(Deb, handle_dependent_exit(ChPid, Reason, State)); terminate_connection -> State; handshake_timeout -> - if State#v1.connection_state =:= running orelse + if ?IS_RUNNING(State) orelse State#v1.connection_state =:= closing orelse State#v1.connection_state =:= closed -> - mainloop(Parent, Deb, State); + mainloop(Deb, State); true -> throw({handshake_timeout, State#v1.callback}) end; @@ -305,16 +357,19 @@ gen_server:reply(From, ok), case ForceTermination of force -> ok; - normal -> mainloop(Parent, Deb, NewState) + normal -> mainloop(Deb, NewState) end; {'$gen_call', From, info} -> gen_server:reply(From, infos(?INFO_KEYS, State)), - mainloop(Parent, Deb, State); + mainloop(Deb, State); {'$gen_call', From, {info, Items}} -> gen_server:reply(From, try {ok, infos(Items, State)} catch Error -> {error, Error} end), - mainloop(Parent, Deb, State); + mainloop(Deb, State); + {'$gen_cast', emit_stats} -> + State1 = internal_emit_stats(State), + mainloop(Deb, State1); {system, From, Request} -> sys:handle_system_msg(Request, From, Parent, ?MODULE, Deb, State); @@ -323,21 +378,44 @@ exit({unexpected_message, Other}) end. -switch_callback(OldState, NewCallback, Length) -> +switch_callback(State = #v1{connection_state = blocked, + heartbeater = Heartbeater}, Callback, Length) -> + ok = rabbit_heartbeat:pause_monitor(Heartbeater), + State#v1{callback = Callback, recv_length = Length, recv_ref = none}; +switch_callback(State, Callback, Length) -> Ref = inet_op(fun () -> rabbit_net:async_recv( - OldState#v1.sock, Length, infinity) end), - OldState#v1{callback = NewCallback, - recv_ref = Ref}. + State#v1.sock, Length, infinity) end), + State#v1{callback = Callback, recv_length = Length, recv_ref = Ref}. -terminate(Explanation, State = #v1{connection_state = running}) -> +terminate(Explanation, State) when ?IS_RUNNING(State) -> {normal, send_exception(State, 0, rabbit_misc:amqp_error( connection_forced, Explanation, [], none))}; terminate(_Explanation, State) -> {force, State}. -close_connection(State = #v1{connection = #connection{ +internal_conserve_memory(true, State = #v1{connection_state = running}) -> + State#v1{connection_state = blocking}; +internal_conserve_memory(false, State = #v1{connection_state = blocking}) -> + State#v1{connection_state = running}; +internal_conserve_memory(false, State = #v1{connection_state = blocked, + heartbeater = Heartbeater, + callback = Callback, + recv_length = Length, + recv_ref = none}) -> + ok = rabbit_heartbeat:resume_monitor(Heartbeater), + switch_callback(State#v1{connection_state = running}, Callback, Length); +internal_conserve_memory(_Conserve, State) -> + State. + +close_connection(State = #v1{queue_collector = Collector, + connection = #connection{ timeout_sec = TimeoutSec}}) -> + %% The spec says "Exclusive queues may only be accessed by the + %% current connection, and are deleted when that connection + %% closes." This does not strictly imply synchrony, but in + %% practice it seems to be what people assume. + rabbit_queue_collector:delete_all(Collector), %% We terminate the connection after the specified interval, but %% no later than ?CLOSING_TIMEOUT seconds. TimeoutMillisec = @@ -352,30 +430,32 @@ put({channel, Channel}, closing), State. -handle_channel_exit(Channel, Reason, State) -> - handle_exception(State, Channel, Reason). +handle_dependent_exit(ChPid, Reason, State) -> + case termination_kind(Reason) of + controlled -> + erase({ch_pid, ChPid}), + maybe_close(State); + uncontrolled -> + case channel_cleanup(ChPid) of + undefined -> exit({abnormal_dependent_exit, ChPid, Reason}); + Channel -> maybe_close( + handle_exception(State, Channel, Reason)) + end + end. -handle_dependent_exit(Pid, normal, State) -> - erase({chpid, Pid}), - maybe_close(State); -handle_dependent_exit(Pid, Reason, State) -> - case channel_cleanup(Pid) of - undefined -> exit({abnormal_dependent_exit, Pid, Reason}); - Channel -> maybe_close(handle_exception(State, Channel, Reason)) - end. - -channel_cleanup(Pid) -> - case get({chpid, Pid}) of - undefined -> undefined; - {channel, Channel} -> erase({channel, Channel}), - erase({chpid, Pid}), - Channel +channel_cleanup(ChPid) -> + case get({ch_pid, ChPid}) of + undefined -> undefined; + Channel -> erase({channel, Channel}), + erase({ch_pid, ChPid}), + Channel end. -all_channels() -> [Pid || {{chpid, Pid},_} <- get()]. +all_channels() -> [ChPid || {{ch_pid, ChPid}, _Channel} <- get()]. terminate_channels() -> - NChannels = length([exit(Pid, normal) || Pid <- all_channels()]), + NChannels = + length([rabbit_channel:shutdown(ChPid) || ChPid <- all_channels()]), if NChannels > 0 -> Timeout = 1000 * ?CHANNEL_TERMINATION_TIMEOUT * NChannels, TimerRef = erlang:send_after(Timeout, self(), cancel_wait), @@ -393,14 +473,15 @@ wait_for_channel_termination(N, TimerRef) -> receive - {'EXIT', Pid, Reason} -> - case channel_cleanup(Pid) of + {'DOWN', _MRef, process, ChPid, Reason} -> + case channel_cleanup(ChPid) of undefined -> - exit({abnormal_dependent_exit, Pid, Reason}); + exit({abnormal_dependent_exit, ChPid, Reason}); Channel -> - case Reason of - normal -> ok; - _ -> + case termination_kind(Reason) of + controlled -> + ok; + uncontrolled -> rabbit_log:error( "connection ~p, channel ~p - " "error while terminating:~n~p~n", @@ -412,19 +493,27 @@ exit(channel_termination_timeout) end. -maybe_close(State = #v1{connection_state = closing}) -> +maybe_close(State = #v1{connection_state = closing, + connection = #connection{protocol = Protocol}, + sock = Sock}) -> case all_channels() of - [] -> ok = send_on_channel0( - State#v1.sock, #'connection.close_ok'{}), - close_connection(State); + [] -> + NewState = close_connection(State), + ok = send_on_channel0(Sock, #'connection.close_ok'{}, Protocol), + NewState; _ -> State end; maybe_close(State) -> State. -handle_frame(Type, 0, Payload, State = #v1{connection_state = CS}) +termination_kind(normal) -> controlled; +termination_kind(_) -> uncontrolled. + +handle_frame(Type, 0, Payload, + State = #v1{connection_state = CS, + connection = #connection{protocol = Protocol}}) when CS =:= closing; CS =:= closed -> - case analyze_frame(Type, Payload) of + case rabbit_command_assembler:analyze_frame(Type, Payload, Protocol) of {method, MethodName, FieldsBin} -> handle_method0(MethodName, FieldsBin, State); _Other -> State @@ -432,213 +521,257 @@ handle_frame(_Type, _Channel, _Payload, State = #v1{connection_state = CS}) when CS =:= closing; CS =:= closed -> State; -handle_frame(Type, 0, Payload, State) -> - case analyze_frame(Type, Payload) of +handle_frame(Type, 0, Payload, + State = #v1{connection = #connection{protocol = Protocol}}) -> + case rabbit_command_assembler:analyze_frame(Type, Payload, Protocol) of error -> throw({unknown_frame, 0, Type, Payload}); heartbeat -> State; - trace -> State; {method, MethodName, FieldsBin} -> handle_method0(MethodName, FieldsBin, State); Other -> throw({unexpected_frame_on_channel0, Other}) end; -handle_frame(Type, Channel, Payload, State) -> - case analyze_frame(Type, Payload) of +handle_frame(Type, Channel, Payload, + State = #v1{connection = #connection{protocol = Protocol}}) -> + case rabbit_command_assembler:analyze_frame(Type, Payload, Protocol) of error -> throw({unknown_frame, Channel, Type, Payload}); heartbeat -> throw({unexpected_heartbeat_frame, Channel}); - trace -> throw({unexpected_trace_frame, Channel}); AnalyzedFrame -> - %%?LOGDEBUG("Ch ~p Frame ~p~n", [Channel, AnalyzedFrame]), case get({channel, Channel}) of - {chpid, ChPid} -> + {ChPid, FramingState} -> + NewAState = process_channel_frame( + AnalyzedFrame, self(), + Channel, ChPid, FramingState), + put({channel, Channel}, {ChPid, NewAState}), case AnalyzedFrame of {method, 'channel.close', _} -> - erase({channel, Channel}); - _ -> ok - end, - ok = rabbit_framing_channel:process(ChPid, AnalyzedFrame), - State; + erase({channel, Channel}), + State; + {method, MethodName, _} -> + case (State#v1.connection_state =:= blocking + andalso + Protocol:method_has_content(MethodName)) of + true -> State#v1{connection_state = blocked}; + false -> State + end; + _ -> + State + end; closing -> %% According to the spec, after sending a %% channel.close we must ignore all frames except + %% channel.close and channel.close_ok. In the + %% event of a channel.close, we should send back a %% channel.close_ok. case AnalyzedFrame of {method, 'channel.close_ok', _} -> erase({channel, Channel}); + {method, 'channel.close', _} -> + %% We're already closing this channel, so + %% there's no cleanup to do (notify + %% queues, etc.) + ok = rabbit_writer:internal_send_command( + State#v1.sock, Channel, + #'channel.close_ok'{}, Protocol); _ -> ok end, State; undefined -> - case State#v1.connection_state of - running -> ok = send_to_new_channel( - Channel, AnalyzedFrame, State), - State; - Other -> throw({channel_frame_while_starting, - Channel, Other, AnalyzedFrame}) + case ?IS_RUNNING(State) of + true -> send_to_new_channel( + Channel, AnalyzedFrame, State); + false -> throw({channel_frame_while_starting, + Channel, State#v1.connection_state, + AnalyzedFrame}) end end end. -analyze_frame(?FRAME_METHOD, <>) -> - {method, rabbit_framing:lookup_method_name({ClassId, MethodId}), MethodFields}; -analyze_frame(?FRAME_HEADER, <>) -> - {content_header, ClassId, Weight, BodySize, Properties}; -analyze_frame(?FRAME_BODY, Body) -> - {content_body, Body}; -analyze_frame(?FRAME_TRACE, _Body) -> - trace; -analyze_frame(?FRAME_HEARTBEAT, <<>>) -> - heartbeat; -analyze_frame(_Type, _Body) -> - error. - handle_input(frame_header, <>, State) -> - %%?LOGDEBUG("Got frame header: ~p/~p/~p~n", [Type, Channel, PayloadSize]), - {State, {frame_payload, Type, Channel, PayloadSize}, PayloadSize + 1}; + ensure_stats_timer( + switch_callback(State, {frame_payload, Type, Channel, PayloadSize}, + PayloadSize + 1)); -handle_input({frame_payload, Type, Channel, PayloadSize}, PayloadAndMarker, State) -> +handle_input({frame_payload, Type, Channel, PayloadSize}, + PayloadAndMarker, State) -> case PayloadAndMarker of <> -> - %%?LOGDEBUG("Frame completed: ~p/~p/~p~n", [Type, Channel, Payload]), - NewState = handle_frame(Type, Channel, Payload, State), - {NewState, frame_header, 7}; + handle_frame(Type, Channel, Payload, + switch_callback(State, frame_header, 7)); _ -> - throw({bad_payload, PayloadAndMarker}) + throw({bad_payload, Type, Channel, PayloadSize, PayloadAndMarker}) end; -handle_input(handshake, <<"AMQP",1,1,ProtocolMajor,ProtocolMinor>>, - State = #v1{sock = Sock, connection = Connection}) -> - case check_version({ProtocolMajor, ProtocolMinor}, - {?PROTOCOL_VERSION_MAJOR, ?PROTOCOL_VERSION_MINOR}) of - true -> - {ok, Product} = application:get_key(id), - {ok, Version} = application:get_key(vsn), - ok = send_on_channel0( - Sock, - #'connection.start'{ - version_major = ?PROTOCOL_VERSION_MAJOR, - version_minor = ?PROTOCOL_VERSION_MINOR, - server_properties = - [{list_to_binary(K), longstr, list_to_binary(V)} || - {K, V} <- - [{"product", Product}, - {"version", Version}, - {"platform", "Erlang/OTP"}, - {"copyright", ?COPYRIGHT_MESSAGE}, - {"information", ?INFORMATION_MESSAGE}]], - mechanisms = <<"PLAIN AMQPLAIN">>, - locales = <<"en_US">> }), - {State#v1{connection = Connection#connection{ - timeout_sec = ?NORMAL_TIMEOUT}, - connection_state = starting}, - frame_header, 7}; - false -> - throw({bad_version, ProtocolMajor, ProtocolMinor}) - end; +%% The two rules pertaining to version negotiation: +%% +%% * If the server cannot support the protocol specified in the +%% protocol header, it MUST respond with a valid protocol header and +%% then close the socket connection. +%% +%% * The server MUST provide a protocol version that is lower than or +%% equal to that requested by the client in the protocol header. +handle_input(handshake, <<"AMQP", 0, 0, 9, 1>>, State) -> + start_connection({0, 9, 1}, rabbit_framing_amqp_0_9_1, State); + +%% This is the protocol header for 0-9, which we can safely treat as +%% though it were 0-9-1. +handle_input(handshake, <<"AMQP", 1, 1, 0, 9>>, State) -> + start_connection({0, 9, 0}, rabbit_framing_amqp_0_9_1, State); + +%% This is what most clients send for 0-8. The 0-8 spec, confusingly, +%% defines the version as 8-0. +handle_input(handshake, <<"AMQP", 1, 1, 8, 0>>, State) -> + start_connection({8, 0, 0}, rabbit_framing_amqp_0_8, State); + +%% The 0-8 spec as on the AMQP web site actually has this as the +%% protocol header; some libraries e.g., py-amqplib, send it when they +%% want 0-8. +handle_input(handshake, <<"AMQP", 1, 1, 9, 1>>, State) -> + start_connection({8, 0, 0}, rabbit_framing_amqp_0_8, State); + +handle_input(handshake, <<"AMQP", A, B, C, D>>, #v1{sock = Sock}) -> + refuse_connection(Sock, {bad_version, A, B, C, D}); handle_input(handshake, Other, #v1{sock = Sock}) -> - ok = inet_op(fun () -> rabbit_net:send( - Sock, <<"AMQP",1,1, - ?PROTOCOL_VERSION_MAJOR, - ?PROTOCOL_VERSION_MINOR>>) end), - throw({bad_header, Other}); + refuse_connection(Sock, {bad_header, Other}); handle_input(Callback, Data, _State) -> throw({bad_input, Callback, Data}). -%% the 0-8 spec, confusingly, defines the version as 8-0 -adjust_version({8,0}) -> {0,8}; -adjust_version(Version) -> Version. -check_version(ClientVersion, ServerVersion) -> - {ClientMajor, ClientMinor} = adjust_version(ClientVersion), - {ServerMajor, ServerMinor} = adjust_version(ServerVersion), - ClientMajor > ServerMajor - orelse - (ClientMajor == ServerMajor andalso - ClientMinor >= ServerMinor). +%% Offer a protocol version to the client. Connection.start only +%% includes a major and minor version number, Luckily 0-9 and 0-9-1 +%% are similar enough that clients will be happy with either. +start_connection({ProtocolMajor, ProtocolMinor, _ProtocolRevision}, + Protocol, + State = #v1{sock = Sock, connection = Connection}) -> + Start = #'connection.start'{ + version_major = ProtocolMajor, + version_minor = ProtocolMinor, + server_properties = server_properties(), + mechanisms = auth_mechanisms_binary(), + locales = <<"en_US">> }, + ok = send_on_channel0(Sock, Start, Protocol), + switch_callback(State#v1{connection = Connection#connection{ + timeout_sec = ?NORMAL_TIMEOUT, + protocol = Protocol}, + connection_state = starting}, + frame_header, 7). + +refuse_connection(Sock, Exception) -> + ok = inet_op(fun () -> rabbit_net:send(Sock, <<"AMQP",0,0,9,1>>) end), + throw(Exception). + +ensure_stats_timer(State = #v1{stats_timer = StatsTimer, + connection_state = running}) -> + Self = self(), + State#v1{stats_timer = rabbit_event:ensure_stats_timer( + StatsTimer, + fun() -> emit_stats(Self) end)}; +ensure_stats_timer(State) -> + State. %%-------------------------------------------------------------------------- -handle_method0(MethodName, FieldsBin, State) -> +handle_method0(MethodName, FieldsBin, + State = #v1{connection = #connection{protocol = Protocol}}) -> + HandleException = + fun(R) -> + case ?IS_RUNNING(State) of + true -> send_exception(State, 0, R); + %% We don't trust the client at this point - force + %% them to wait for a bit so they can't DOS us with + %% repeated failed logins etc. + false -> timer:sleep(?SILENT_CLOSE_DELAY * 1000), + throw({channel0_error, State#v1.connection_state, R}) + end + end, try - handle_method0(rabbit_framing:decode_method_fields( - MethodName, FieldsBin), + handle_method0(Protocol:decode_method_fields(MethodName, FieldsBin), State) - catch exit:Reason -> - CompleteReason = case Reason of - #amqp_error{method = none} -> - Reason#amqp_error{method = MethodName}; - OtherReason -> OtherReason - end, - case State#v1.connection_state of - running -> send_exception(State, 0, CompleteReason); - Other -> throw({channel0_error, Other, CompleteReason}) - end + catch exit:#amqp_error{method = none} = Reason -> + HandleException(Reason#amqp_error{method = MethodName}); + Type:Reason -> + HandleException({Type, Reason, MethodName, erlang:get_stacktrace()}) end. handle_method0(#'connection.start_ok'{mechanism = Mechanism, response = Response, client_properties = ClientProperties}, - State = #v1{connection_state = starting, - connection = Connection, - sock = Sock}) -> - User = rabbit_access_control:check_login(Mechanism, Response), - ok = send_on_channel0( - Sock, - #'connection.tune'{channel_max = 0, - %% set to zero once QPid fix their negotiation - frame_max = 131072, - heartbeat = 0}), - State#v1{connection_state = tuning, - connection = Connection#connection{ - user = User, - client_properties = ClientProperties}}; -handle_method0(#'connection.tune_ok'{channel_max = _ChannelMax, - frame_max = FrameMax, + State0 = #v1{connection_state = starting, + connection = Connection, + sock = Sock}) -> + AuthMechanism = auth_mechanism_to_module(Mechanism), + State = State0#v1{auth_mechanism = AuthMechanism, + auth_state = AuthMechanism:init(Sock), + connection_state = securing, + connection = + Connection#connection{ + client_properties = ClientProperties}}, + auth_phase(Response, State); + +handle_method0(#'connection.secure_ok'{response = Response}, + State = #v1{connection_state = securing}) -> + auth_phase(Response, State); + +handle_method0(#'connection.tune_ok'{frame_max = FrameMax, heartbeat = ClientHeartbeat}, State = #v1{connection_state = tuning, connection = Connection, - sock = Sock}) -> - %% if we have a channel_max limit that the client wishes to - %% exceed, die as per spec. Not currently a problem, so we ignore - %% the client's channel_max parameter. - rabbit_heartbeat:start_heartbeat(Sock, ClientHeartbeat), - State#v1{connection_state = opening, - connection = Connection#connection{ - timeout_sec = ClientHeartbeat, - frame_max = FrameMax}}; -handle_method0(#'connection.open'{virtual_host = VHostPath, - insist = Insist}, + sock = Sock, + start_heartbeat_fun = SHF}) -> + if (FrameMax /= 0) and (FrameMax < ?FRAME_MIN_SIZE) -> + rabbit_misc:protocol_error( + not_allowed, "frame_max=~w < ~w min size", + [FrameMax, ?FRAME_MIN_SIZE]); + (?FRAME_MAX /= 0) and (FrameMax > ?FRAME_MAX) -> + rabbit_misc:protocol_error( + not_allowed, "frame_max=~w > ~w max size", + [FrameMax, ?FRAME_MAX]); + true -> + Frame = rabbit_binary_generator:build_heartbeat_frame(), + SendFun = fun() -> catch rabbit_net:send(Sock, Frame) end, + Parent = self(), + ReceiveFun = fun() -> Parent ! timeout end, + Heartbeater = SHF(Sock, ClientHeartbeat, SendFun, + ClientHeartbeat, ReceiveFun), + State#v1{connection_state = opening, + connection = Connection#connection{ + timeout_sec = ClientHeartbeat, + frame_max = FrameMax}, + heartbeater = Heartbeater} + end; + +handle_method0(#'connection.open'{virtual_host = VHostPath}, + State = #v1{connection_state = opening, connection = Connection = #connection{ - user = User}, - sock = Sock}) -> + user = User, + protocol = Protocol}, + sock = Sock, + stats_timer = StatsTimer}) -> ok = rabbit_access_control:check_vhost_access(User, VHostPath), NewConnection = Connection#connection{vhost = VHostPath}, - KnownHosts = format_listeners(rabbit_networking:active_listeners()), - Redirects = compute_redirects(Insist), - if Redirects == [] -> - ok = send_on_channel0( - Sock, - #'connection.open_ok'{known_hosts = KnownHosts}), - State#v1{connection_state = running, - connection = NewConnection}; - true -> - %% FIXME: 'host' is supposed to only contain one - %% address; but which one do we pick? This is - %% really a problem with the spec. - Host = format_listeners(Redirects), - rabbit_log:info("connection ~p redirecting to ~p~n", - [self(), Host]), - ok = send_on_channel0( - Sock, - #'connection.redirect'{host = Host, - known_hosts = KnownHosts}), - close_connection(State#v1{connection = NewConnection}) - end; -handle_method0(#'connection.close'{}, - State = #v1{connection_state = running}) -> - lists:foreach(fun rabbit_framing_channel:shutdown/1, all_channels()), + ok = send_on_channel0(Sock, #'connection.open_ok'{}, Protocol), + State1 = internal_conserve_memory( + rabbit_alarm:register(self(), {?MODULE, conserve_memory, []}), + State#v1{connection_state = running, + connection = NewConnection}), + rabbit_event:notify(connection_created, + infos(?CREATION_EVENT_KEYS, State1)), + rabbit_event:if_enabled(StatsTimer, + fun() -> internal_emit_stats(State1) end), + State1; +handle_method0(#'connection.close'{}, State) when ?IS_RUNNING(State) -> + lists:foreach(fun rabbit_channel:shutdown/1, all_channels()), maybe_close(State#v1{connection_state = closing}); +handle_method0(#'connection.close'{}, + State = #v1{connection_state = CS, + connection = #connection{protocol = Protocol}, + sock = Sock}) + when CS =:= closing; CS =:= closed -> + %% We're already closed or closing, so we don't need to cleanup + %% anything. + ok = send_on_channel0(Sock, #'connection.close_ok'{}, Protocol), + State; handle_method0(#'connection.close_ok'{}, State = #v1{connection_state = closed}) -> self() ! terminate_connection, @@ -650,22 +783,62 @@ rabbit_misc:protocol_error( channel_error, "unexpected method in connection state ~w", [S]). -send_on_channel0(Sock, Method) -> - ok = rabbit_writer:internal_send_command(Sock, 0, Method). +send_on_channel0(Sock, Method, Protocol) -> + ok = rabbit_writer:internal_send_command(Sock, 0, Method, Protocol). -format_listeners(Listeners) -> +auth_mechanism_to_module(TypeBin) -> + case rabbit_registry:binary_to_type(TypeBin) of + {error, not_found} -> + rabbit_misc:protocol_error( + command_invalid, "unknown authentication mechanism '~s'", + [TypeBin]); + T -> + case {lists:member(T, auth_mechanisms()), + rabbit_registry:lookup_module(auth_mechanism, T)} of + {true, {ok, Module}} -> + Module; + _ -> + rabbit_misc:protocol_error( + command_invalid, + "invalid authentication mechanism '~s'", [T]) + end + end. + +auth_mechanisms() -> + {ok, Configured} = application:get_env(auth_mechanisms), + [Name || {Name, _Module} <- rabbit_registry:lookup_all(auth_mechanism), + lists:member(Name, Configured)]. + +auth_mechanisms_binary() -> list_to_binary( - rabbit_misc:intersperse( - $,, - [io_lib:format("~s:~w", [Host, Port]) || - #listener{host = Host, port = Port} <- Listeners])). - -compute_redirects(true) -> []; -compute_redirects(false) -> - Node = node(), - LNode = rabbit_load:pick(), - if Node == LNode -> []; - true -> rabbit_networking:node_listeners(LNode) + string:join( + [atom_to_list(A) || A <- auth_mechanisms()], " ")). + +auth_phase(Response, + State = #v1{auth_mechanism = AuthMechanism, + auth_state = AuthState, + connection = Connection = + #connection{protocol = Protocol}, + sock = Sock}) -> + case AuthMechanism:handle_response(Response, AuthState) of + {refused, Msg, Args} -> + rabbit_misc:protocol_error( + access_refused, "~s login refused: ~s", + [proplists:get_value(name, AuthMechanism:description()), + io_lib:format(Msg, Args)]); + {protocol_error, Msg, Args} -> + rabbit_misc:protocol_error(syntax_error, Msg, Args); + {challenge, Challenge, AuthState1} -> + Secure = #'connection.secure'{challenge = Challenge}, + ok = send_on_channel0(Sock, Secure, Protocol), + State#v1{auth_state = AuthState1}; + {ok, User} -> + Tune = #'connection.tune'{channel_max = 0, + frame_max = ?FRAME_MAX, + heartbeat = 0}, + ok = send_on_channel0(Sock, Tune, Protocol), + State#v1{connection_state = tuning, + connection = Connection#connection{user = User}} end. %%-------------------------------------------------------------------------- @@ -675,31 +848,48 @@ i(pid, #v1{}) -> self(); i(address, #v1{sock = Sock}) -> - {ok, {A, _}} = rabbit_net:sockname(Sock), - A; + socket_info(fun rabbit_net:sockname/1, fun ({A, _}) -> A end, Sock); i(port, #v1{sock = Sock}) -> - {ok, {_, P}} = rabbit_net:sockname(Sock), - P; + socket_info(fun rabbit_net:sockname/1, fun ({_, P}) -> P end, Sock); i(peer_address, #v1{sock = Sock}) -> - {ok, {A, _}} = rabbit_net:peername(Sock), - A; + socket_info(fun rabbit_net:peername/1, fun ({A, _}) -> A end, Sock); i(peer_port, #v1{sock = Sock}) -> - {ok, {_, P}} = rabbit_net:peername(Sock), - P; + socket_info(fun rabbit_net:peername/1, fun ({_, P}) -> P end, Sock); +i(ssl, #v1{sock = Sock}) -> + rabbit_net:is_ssl(Sock); +i(ssl_protocol, #v1{sock = Sock}) -> + ssl_info(fun ({P, _}) -> P end, Sock); +i(ssl_key_exchange, #v1{sock = Sock}) -> + ssl_info(fun ({_, {K, _, _}}) -> K end, Sock); +i(ssl_cipher, #v1{sock = Sock}) -> + ssl_info(fun ({_, {_, C, _}}) -> C end, Sock); +i(ssl_hash, #v1{sock = Sock}) -> + ssl_info(fun ({_, {_, _, H}}) -> H end, Sock); +i(peer_cert_issuer, #v1{sock = Sock}) -> + cert_info(fun rabbit_ssl:peer_cert_issuer/1, Sock); +i(peer_cert_subject, #v1{sock = Sock}) -> + cert_info(fun rabbit_ssl:peer_cert_subject/1, Sock); +i(peer_cert_validity, #v1{sock = Sock}) -> + cert_info(fun rabbit_ssl:peer_cert_validity/1, Sock); i(SockStat, #v1{sock = Sock}) when SockStat =:= recv_oct; SockStat =:= recv_cnt; SockStat =:= send_oct; SockStat =:= send_cnt; SockStat =:= send_pend -> - case rabbit_net:getstat(Sock, [SockStat]) of - {ok, [{SockStat, StatVal}]} -> StatVal; - {error, einval} -> undefined; - {error, Error} -> throw({cannot_get_socket_stats, Error}) - end; + socket_info(fun () -> rabbit_net:getstat(Sock, [SockStat]) end, + fun ([{_, I}]) -> I end); i(state, #v1{connection_state = S}) -> S; i(channels, #v1{}) -> length(all_channels()); +i(protocol, #v1{connection = #connection{protocol = none}}) -> + none; +i(protocol, #v1{connection = #connection{protocol = Protocol}}) -> + Protocol:version(); +i(auth_mechanism, #v1{auth_mechanism = none}) -> + none; +i(auth_mechanism, #v1{auth_mechanism = Mechanism}) -> + proplists:get_value(name, Mechanism:description()); i(user, #v1{connection = #connection{user = #user{username = Username}}}) -> Username; i(user, #v1{connection = #connection{user = none}}) -> @@ -716,20 +906,61 @@ i(Item, #v1{}) -> throw({bad_argument, Item}). +socket_info(Get, Select, Sock) -> + socket_info(fun() -> Get(Sock) end, Select). + +socket_info(Get, Select) -> + case Get() of + {ok, T} -> Select(T); + {error, _} -> '' + end. + +ssl_info(F, Sock) -> + case rabbit_net:ssl_info(Sock) of + nossl -> ''; + {error, _} -> ''; + {ok, Info} -> F(Info) + end. + +cert_info(F, Sock) -> + case rabbit_net:peercert(Sock) of + nossl -> ''; + {error, no_peercert} -> ''; + {ok, Cert} -> list_to_binary(F(Cert)) + end. + %%-------------------------------------------------------------------------- send_to_new_channel(Channel, AnalyzedFrame, State) -> - #v1{sock = Sock, connection = #connection{ - frame_max = FrameMax, - user = #user{username = Username}, - vhost = VHost}} = State, - WriterPid = rabbit_writer:start(Sock, Channel, FrameMax), - ChPid = rabbit_framing_channel:start_link( - fun rabbit_channel:start_link/5, - [Channel, self(), WriterPid, Username, VHost]), - put({channel, Channel}, {chpid, ChPid}), - put({chpid, ChPid}, {channel, Channel}), - ok = rabbit_framing_channel:process(ChPid, AnalyzedFrame). + #v1{sock = Sock, queue_collector = Collector, + channel_sup_sup_pid = ChanSupSup, + connection = #connection{protocol = Protocol, + frame_max = FrameMax, + user = User, + vhost = VHost}} = State, + {ok, _ChSupPid, {ChPid, AState}} = + rabbit_channel_sup_sup:start_channel( + ChanSupSup, {tcp, Protocol, Sock, Channel, FrameMax, self(), User, + VHost, Collector}), + erlang:monitor(process, ChPid), + NewAState = process_channel_frame(AnalyzedFrame, self(), + Channel, ChPid, AState), + put({channel, Channel}, {ChPid, NewAState}), + put({ch_pid, ChPid}, Channel), + State. + +process_channel_frame(Frame, ErrPid, Channel, ChPid, AState) -> + case rabbit_command_assembler:process(Frame, AState) of + {ok, NewAState} -> NewAState; + {ok, Method, NewAState} -> rabbit_channel:do(ChPid, Method), + NewAState; + {ok, Method, Content, NewAState} -> rabbit_channel:do(ChPid, + Method, Content), + NewAState; + {error, Reason} -> ErrPid ! {channel_exit, Channel, + Reason}, + AState + end. log_channel_error(ConnectionState, Channel, Reason) -> rabbit_log:error("connection ~p (~p), channel ~p - error:~n~p~n", @@ -742,60 +973,19 @@ log_channel_error(CS, Channel, Reason), send_exception(State, Channel, Reason). -send_exception(State, Channel, Reason) -> - {ShouldClose, CloseChannel, CloseMethod} = map_exception(Channel, Reason), +send_exception(State = #v1{connection = #connection{protocol = Protocol}}, + Channel, Reason) -> + {ShouldClose, CloseChannel, CloseMethod} = + rabbit_binary_generator:map_exception(Channel, Reason, Protocol), NewState = case ShouldClose of true -> terminate_channels(), close_connection(State); false -> close_channel(Channel, State) end, ok = rabbit_writer:internal_send_command( - NewState#v1.sock, CloseChannel, CloseMethod), + NewState#v1.sock, CloseChannel, CloseMethod, Protocol), NewState. -map_exception(Channel, Reason) -> - {SuggestedClose, ReplyCode, ReplyText, FailedMethod} = - lookup_amqp_exception(Reason), - ShouldClose = SuggestedClose or (Channel == 0), - {ClassId, MethodId} = case FailedMethod of - {_, _} -> FailedMethod; - none -> {0, 0}; - _ -> rabbit_framing:method_id(FailedMethod) - end, - {CloseChannel, CloseMethod} = - case ShouldClose of - true -> {0, #'connection.close'{reply_code = ReplyCode, - reply_text = ReplyText, - class_id = ClassId, - method_id = MethodId}}; - false -> {Channel, #'channel.close'{reply_code = ReplyCode, - reply_text = ReplyText, - class_id = ClassId, - method_id = MethodId}} - end, - {ShouldClose, CloseChannel, CloseMethod}. - -%% FIXME: this clause can go when we move to AMQP spec >=8.1 -lookup_amqp_exception(#amqp_error{name = precondition_failed, - explanation = Expl, - method = Method}) -> - ExplBin = amqp_exception_explanation(<<"PRECONDITION_FAILED">>, Expl), - {false, 406, ExplBin, Method}; -lookup_amqp_exception(#amqp_error{name = Name, - explanation = Expl, - method = Method}) -> - {ShouldClose, Code, Text} = rabbit_framing:lookup_amqp_exception(Name), - ExplBin = amqp_exception_explanation(Text, Expl), - {ShouldClose, Code, ExplBin, Method}; -lookup_amqp_exception(Other) -> - rabbit_log:warning("Non-AMQP exit reason '~p'~n", [Other]), - {ShouldClose, Code, Text} = - rabbit_framing:lookup_amqp_exception(internal_error), - {ShouldClose, Code, Text, none}. - -amqp_exception_explanation(Text, Expl) -> - ExplBin = list_to_binary(Expl), - CompleteTextBin = <>, - if size(CompleteTextBin) > 255 -> <>; - true -> CompleteTextBin - end. +internal_emit_stats(State = #v1{stats_timer = StatsTimer}) -> + rabbit_event:notify(connection_stats, infos(?STATISTICS_KEYS, State)), + State#v1{stats_timer = rabbit_event:reset_stats_timer(StatsTimer)}. diff -Nru rabbitmq-server-1.7.2/src/rabbit_registry.erl rabbitmq-server-2.3.1/src/rabbit_registry.erl --- rabbitmq-server-1.7.2/src/rabbit_registry.erl 1970-01-01 00:00:00.000000000 +0000 +++ rabbitmq-server-2.3.1/src/rabbit_registry.erl 2011-02-03 12:47:35.000000000 +0000 @@ -0,0 +1,124 @@ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License +%% at http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and +%% limitations under the License. +%% +%% The Original Code is RabbitMQ. +%% +%% The Initial Developer of the Original Code is VMware, Inc. +%% Copyright (c) 2007-2011 VMware, Inc. All rights reserved. +%% + +-module(rabbit_registry). + +-behaviour(gen_server). + +-export([start_link/0]). + +-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2, + code_change/3]). + +-export([register/3, binary_to_type/1, lookup_module/2, lookup_all/1]). + +-define(SERVER, ?MODULE). +-define(ETS_NAME, ?MODULE). + +-ifdef(use_specs). + +-spec(start_link/0 :: () -> rabbit_types:ok_pid_or_error()). +-spec(register/3 :: (atom(), binary(), atom()) -> 'ok'). +-spec(binary_to_type/1 :: + (binary()) -> atom() | rabbit_types:error('not_found')). +-spec(lookup_module/2 :: + (atom(), atom()) -> rabbit_types:ok_or_error2(atom(), 'not_found')). +-spec(lookup_all/1 :: (atom()) -> [{atom(), atom()}]). + +-endif. + +%%--------------------------------------------------------------------------- + +start_link() -> + gen_server:start_link({local, ?SERVER}, ?MODULE, [], []). + +%%--------------------------------------------------------------------------- + +register(Class, TypeName, ModuleName) -> + gen_server:call(?SERVER, {register, Class, TypeName, ModuleName}). + +%% This is used with user-supplied arguments (e.g., on exchange +%% declare), so we restrict it to existing atoms only. This means it +%% can throw a badarg, indicating that the type cannot have been +%% registered. +binary_to_type(TypeBin) when is_binary(TypeBin) -> + case catch list_to_existing_atom(binary_to_list(TypeBin)) of + {'EXIT', {badarg, _}} -> {error, not_found}; + TypeAtom -> TypeAtom + end. + +lookup_module(Class, T) when is_atom(T) -> + case ets:lookup(?ETS_NAME, {Class, T}) of + [{_, Module}] -> + {ok, Module}; + [] -> + {error, not_found} + end. + +lookup_all(Class) -> + [{K, V} || [K, V] <- ets:match(?ETS_NAME, {{Class, '$1'}, '$2'})]. + +%%--------------------------------------------------------------------------- + +internal_binary_to_type(TypeBin) when is_binary(TypeBin) -> + list_to_atom(binary_to_list(TypeBin)). + +internal_register(Class, TypeName, ModuleName) + when is_atom(Class), is_binary(TypeName), is_atom(ModuleName) -> + ok = sanity_check_module(class_module(Class), ModuleName), + true = ets:insert(?ETS_NAME, + {{Class, internal_binary_to_type(TypeName)}, ModuleName}), + ok. + +sanity_check_module(ClassModule, Module) -> + case catch lists:member(ClassModule, + lists:flatten( + [Bs || {Attr, Bs} <- + Module:module_info(attributes), + Attr =:= behavior orelse + Attr =:= behaviour])) of + {'EXIT', {undef, _}} -> {error, not_module}; + false -> {error, {not_type, ClassModule}}; + true -> ok + end. + +class_module(exchange) -> rabbit_exchange_type; +class_module(auth_mechanism) -> rabbit_auth_mechanism. + +%%--------------------------------------------------------------------------- + +init([]) -> + ?ETS_NAME = ets:new(?ETS_NAME, [protected, set, named_table]), + {ok, none}. + +handle_call({register, Class, TypeName, ModuleName}, _From, State) -> + ok = internal_register(Class, TypeName, ModuleName), + {reply, ok, State}; + +handle_call(Request, _From, State) -> + {stop, {unhandled_call, Request}, State}. + +handle_cast(Request, State) -> + {stop, {unhandled_cast, Request}, State}. + +handle_info(Message, State) -> + {stop, {unhandled_info, Message}, State}. + +terminate(_Reason, _State) -> + ok. + +code_change(_OldVsn, State, _Extra) -> + {ok, State}. diff -Nru rabbitmq-server-1.7.2/src/rabbit_restartable_sup.erl rabbitmq-server-2.3.1/src/rabbit_restartable_sup.erl --- rabbitmq-server-1.7.2/src/rabbit_restartable_sup.erl 1970-01-01 00:00:00.000000000 +0000 +++ rabbitmq-server-2.3.1/src/rabbit_restartable_sup.erl 2011-02-03 12:47:35.000000000 +0000 @@ -0,0 +1,32 @@ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License +%% at http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and +%% limitations under the License. +%% +%% The Original Code is RabbitMQ. +%% +%% The Initial Developer of the Original Code is VMware, Inc. +%% Copyright (c) 2007-2011 VMware, Inc. All rights reserved. +%% + +-module(rabbit_restartable_sup). + +-behaviour(supervisor). + +-export([start_link/2]). + +-export([init/1]). + +-include("rabbit.hrl"). + +start_link(Name, {_M, _F, _A} = Fun) -> + supervisor:start_link({local, Name}, ?MODULE, [Fun]). + +init([{Mod, _F, _A} = Fun]) -> + {ok, {{one_for_one, 10, 10}, + [{Mod, Fun, transient, ?MAX_WAIT, worker, [Mod]}]}}. diff -Nru rabbitmq-server-1.7.2/src/rabbit_router.erl rabbitmq-server-2.3.1/src/rabbit_router.erl --- rabbitmq-server-1.7.2/src/rabbit_router.erl 2010-02-15 16:01:03.000000000 +0000 +++ rabbitmq-server-2.3.1/src/rabbit_router.erl 2011-02-03 12:47:35.000000000 +0000 @@ -1,176 +1,108 @@ -%% The contents of this file are subject to the Mozilla Public License -%% Version 1.1 (the "License"); you may not use this file except in -%% compliance with the License. You may obtain a copy of the License at -%% http://www.mozilla.org/MPL/ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License +%% at http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and +%% limitations under the License. %% -%% Software distributed under the License is distributed on an "AS IS" -%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the -%% License for the specific language governing rights and limitations -%% under the License. +%% The Original Code is RabbitMQ. %% -%% The Original Code is RabbitMQ. -%% -%% The Initial Developers of the Original Code are LShift Ltd, -%% Cohesive Financial Technologies LLC, and Rabbit Technologies Ltd. -%% -%% Portions created before 22-Nov-2008 00:00:00 GMT by LShift Ltd, -%% Cohesive Financial Technologies LLC, or Rabbit Technologies Ltd -%% are Copyright (C) 2007-2008 LShift Ltd, Cohesive Financial -%% Technologies LLC, and Rabbit Technologies Ltd. -%% -%% Portions created by LShift Ltd are Copyright (C) 2007-2010 LShift -%% Ltd. Portions created by Cohesive Financial Technologies LLC are -%% Copyright (C) 2007-2010 Cohesive Financial Technologies -%% LLC. Portions created by Rabbit Technologies Ltd are Copyright -%% (C) 2007-2010 Rabbit Technologies Ltd. -%% -%% All Rights Reserved. -%% -%% Contributor(s): ______________________________________. +%% The Initial Developer of the Original Code is VMware, Inc. +%% Copyright (c) 2007-2011 VMware, Inc. All rights reserved. %% -module(rabbit_router). +-include_lib("stdlib/include/qlc.hrl"). -include("rabbit.hrl"). --behaviour(gen_server2). - --export([start_link/0, - deliver/2]). - --export([init/1, handle_call/3, handle_cast/2, handle_info/2, - terminate/2, code_change/3]). - --define(SERVER, ?MODULE). - -%% cross-node routing optimisation is disabled because of bug 19758. --define(BUG19758, true). +-export([deliver/2, match_bindings/2, match_routing_key/2]). %%---------------------------------------------------------------------------- -ifdef(use_specs). --spec(start_link/0 :: () -> {'ok', pid()} | 'ignore' | {'error', any()}). --spec(deliver/2 :: ([pid()], delivery()) -> {routing_result(), [pid()]}). +-export_type([routing_key/0, routing_result/0, match_result/0]). + +-type(routing_key() :: binary()). +-type(routing_result() :: 'routed' | 'unroutable' | 'not_delivered'). +-type(qpids() :: [pid()]). +-type(match_result() :: [rabbit_types:binding_destination()]). + +-spec(deliver/2 :: ([rabbit_amqqueue:name()], rabbit_types:delivery()) -> + {routing_result(), qpids()}). +-spec(match_bindings/2 :: (rabbit_types:binding_source(), + fun ((rabbit_types:binding()) -> boolean())) -> + match_result()). +-spec(match_routing_key/2 :: (rabbit_types:binding_source(), + routing_key() | '_') -> match_result()). -endif. %%---------------------------------------------------------------------------- -start_link() -> - gen_server2:start_link({local, ?SERVER}, ?MODULE, [], []). - --ifdef(BUG19758). - -deliver(QPids, Delivery) -> - check_delivery(Delivery#delivery.mandatory, Delivery#delivery.immediate, - run_bindings(QPids, Delivery)). - --else. - -deliver(QPids, Delivery) -> - %% we reduce inter-node traffic by grouping the qpids by node and - %% only delivering one copy of the message to each node involved, - %% which then in turn delivers it to its queues. - deliver_per_node( - dict:to_list( - lists:foldl( - fun (QPid, D) -> - dict:update(node(QPid), - fun (QPids1) -> [QPid | QPids1] end, - [QPid], D) - end, - dict:new(), QPids)), - Delivery). - -deliver_per_node([{Node, QPids}], Delivery) when Node == node() -> - %% optimisation - check_delivery(Delivery#delivery.mandatory, Delivery#delivery.immediate, - run_bindings(QPids, Delivery)); -deliver_per_node(NodeQPids, Delivery = #delivery{mandatory = false, - immediate = false}) -> +deliver(QNames, Delivery = #delivery{mandatory = false, + immediate = false}) -> %% optimisation: when Mandatory = false and Immediate = false, - %% rabbit_amqqueue:deliver in run_bindings below will deliver the - %% message to the queue process asynchronously, and return true, - %% which means all the QPids will always be returned. It is - %% therefore safe to use a fire-and-forget cast here and return - %% the QPids - the semantics is preserved. This scales much better - %% than the non-immediate case below. - {routed, - lists:flatmap( - fun ({Node, QPids}) -> - gen_server2:cast({?SERVER, Node}, {deliver, QPids, Delivery}), - QPids - end, - NodeQPids)}; -deliver_per_node(NodeQPids, Delivery) -> - R = rabbit_misc:upmap( - fun ({Node, QPids}) -> - try gen_server2:call({?SERVER, Node}, - {deliver, QPids, Delivery}, - infinity) - catch - _Class:_Reason -> - %% TODO: figure out what to log (and do!) here - {false, []} - end - end, - NodeQPids), + %% rabbit_amqqueue:deliver will deliver the message to the queue + %% process asynchronously, and return true, which means all the + %% QPids will always be returned. It is therefore safe to use a + %% fire-and-forget cast here and return the QPids - the semantics + %% is preserved. This scales much better than the non-immediate + %% case below. + QPids = lookup_qpids(QNames), + delegate:invoke_no_result( + QPids, fun (Pid) -> rabbit_amqqueue:deliver(Pid, Delivery) end), + {routed, QPids}; + +deliver(QNames, Delivery = #delivery{mandatory = Mandatory, + immediate = Immediate}) -> + QPids = lookup_qpids(QNames), + {Success, _} = + delegate:invoke(QPids, + fun (Pid) -> + rabbit_amqqueue:deliver(Pid, Delivery) + end), {Routed, Handled} = - lists:foldl(fun ({Routed, Handled}, {RoutedAcc, HandledAcc}) -> - {Routed or RoutedAcc, - %% we do the concatenation below, which - %% should be faster - [Handled | HandledAcc]} - end, - {false, []}, - R), - check_delivery(Delivery#delivery.mandatory, Delivery#delivery.immediate, - {Routed, lists:append(Handled)}). - --endif. - -%%-------------------------------------------------------------------- - -init([]) -> - {ok, no_state}. - -handle_call({deliver, QPids, Delivery}, From, State) -> - spawn( - fun () -> - R = run_bindings(QPids, Delivery), - gen_server2:reply(From, R) - end), - {noreply, State}. - -handle_cast({deliver, QPids, Delivery}, State) -> - %% in order to preserve message ordering we must not spawn here - run_bindings(QPids, Delivery), - {noreply, State}. + lists:foldl(fun fold_deliveries/2, {false, []}, Success), + check_delivery(Mandatory, Immediate, {Routed, Handled}). -handle_info(_Info, State) -> - {noreply, State}. -terminate(_Reason, _State) -> - ok. - -code_change(_OldVsn, State, _Extra) -> - {ok, State}. +%% TODO: Maybe this should be handled by a cursor instead. +%% TODO: This causes a full scan for each entry with the same source +match_bindings(SrcName, Match) -> + Query = qlc:q([DestinationName || + #route{binding = Binding = #binding{ + source = SrcName1, + destination = DestinationName}} <- + mnesia:table(rabbit_route), + SrcName == SrcName1, + Match(Binding)]), + mnesia:async_dirty(fun qlc:e/1, [Query]). + +match_routing_key(SrcName, RoutingKey) -> + MatchHead = #route{binding = #binding{source = SrcName, + destination = '$1', + key = RoutingKey, + _ = '_'}}, + mnesia:dirty_select(rabbit_route, [{MatchHead, [], ['$1']}]). %%-------------------------------------------------------------------- -run_bindings(QPids, Delivery) -> - lists:foldl( - fun (QPid, {Routed, Handled}) -> - case catch rabbit_amqqueue:deliver(QPid, Delivery) of - true -> {true, [QPid | Handled]}; - false -> {true, Handled}; - {'EXIT', _Reason} -> {Routed, Handled} - end - end, - {false, []}, - QPids). +fold_deliveries({Pid, true},{_, Handled}) -> {true, [Pid|Handled]}; +fold_deliveries({_, false},{_, Handled}) -> {true, Handled}. %% check_delivery(Mandatory, Immediate, {WasRouted, QPids}) check_delivery(true, _ , {false, []}) -> {unroutable, []}; check_delivery(_ , true, {_ , []}) -> {not_delivered, []}; check_delivery(_ , _ , {_ , Qs}) -> {routed, Qs}. + +lookup_qpids(QNames) -> + lists:foldl(fun (QName, QPids) -> + case mnesia:dirty_read({rabbit_queue, QName}) of + [#amqqueue{pid = QPid}] -> [QPid | QPids]; + [] -> QPids + end + end, [], QNames). diff -Nru rabbitmq-server-1.7.2/src/rabbit_sasl_report_file_h.erl rabbitmq-server-2.3.1/src/rabbit_sasl_report_file_h.erl --- rabbitmq-server-1.7.2/src/rabbit_sasl_report_file_h.erl 2010-02-15 16:01:03.000000000 +0000 +++ rabbitmq-server-2.3.1/src/rabbit_sasl_report_file_h.erl 2011-02-03 12:47:35.000000000 +0000 @@ -1,39 +1,25 @@ -%% The contents of this file are subject to the Mozilla Public License -%% Version 1.1 (the "License"); you may not use this file except in -%% compliance with the License. You may obtain a copy of the License at -%% http://www.mozilla.org/MPL/ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License +%% at http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and +%% limitations under the License. %% -%% Software distributed under the License is distributed on an "AS IS" -%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the -%% License for the specific language governing rights and limitations -%% under the License. +%% The Original Code is RabbitMQ. %% -%% The Original Code is RabbitMQ. -%% -%% The Initial Developers of the Original Code are LShift Ltd, -%% Cohesive Financial Technologies LLC, and Rabbit Technologies Ltd. -%% -%% Portions created before 22-Nov-2008 00:00:00 GMT by LShift Ltd, -%% Cohesive Financial Technologies LLC, or Rabbit Technologies Ltd -%% are Copyright (C) 2007-2008 LShift Ltd, Cohesive Financial -%% Technologies LLC, and Rabbit Technologies Ltd. -%% -%% Portions created by LShift Ltd are Copyright (C) 2007-2010 LShift -%% Ltd. Portions created by Cohesive Financial Technologies LLC are -%% Copyright (C) 2007-2010 Cohesive Financial Technologies -%% LLC. Portions created by Rabbit Technologies Ltd are Copyright -%% (C) 2007-2010 Rabbit Technologies Ltd. -%% -%% All Rights Reserved. -%% -%% Contributor(s): ______________________________________. +%% The Initial Developer of the Original Code is VMware, Inc. +%% Copyright (c) 2007-2011 VMware, Inc. All rights reserved. %% -module(rabbit_sasl_report_file_h). -behaviour(gen_event). --export([init/1, handle_event/2, handle_call/2, handle_info/2, terminate/2, code_change/3]). +-export([init/1, handle_event/2, handle_call/2, handle_info/2, terminate/2, + code_change/3]). %% rabbit_sasl_report_file_h is a wrapper around the sasl_report_file_h %% module because the original's init/1 does not match properly diff -Nru rabbitmq-server-1.7.2/src/rabbit_ssl.erl rabbitmq-server-2.3.1/src/rabbit_ssl.erl --- rabbitmq-server-1.7.2/src/rabbit_ssl.erl 1970-01-01 00:00:00.000000000 +0000 +++ rabbitmq-server-2.3.1/src/rabbit_ssl.erl 2011-02-03 12:47:35.000000000 +0000 @@ -0,0 +1,173 @@ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License +%% at http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and +%% limitations under the License. +%% +%% The Original Code is RabbitMQ. +%% +%% The Initial Developer of the Original Code is VMware, Inc. +%% Copyright (c) 2007-2011 VMware, Inc. All rights reserved. +%% + +-module(rabbit_ssl). + +-include("rabbit.hrl"). + +-include_lib("public_key/include/public_key.hrl"). + +-export([peer_cert_issuer/1, peer_cert_subject/1, peer_cert_validity/1]). +-export([peer_cert_subject_item/2]). + +%%-------------------------------------------------------------------------- + +-ifdef(use_specs). + +-export_type([certificate/0]). + +-type(certificate() :: binary()). + +-spec(peer_cert_issuer/1 :: (certificate()) -> string()). +-spec(peer_cert_subject/1 :: (certificate()) -> string()). +-spec(peer_cert_validity/1 :: (certificate()) -> string()). +-spec(peer_cert_subject_item/2 :: + (certificate(), tuple()) -> string() | 'not_found'). + +-endif. + +%%-------------------------------------------------------------------------- +%% High-level functions used by reader +%%-------------------------------------------------------------------------- + +%% Return a string describing the certificate's issuer. +peer_cert_issuer(Cert) -> + cert_info(fun(#'OTPCertificate' { + tbsCertificate = #'OTPTBSCertificate' { + issuer = Issuer }}) -> + format_rdn_sequence(Issuer) + end, Cert). + +%% Return a string describing the certificate's subject, as per RFC4514. +peer_cert_subject(Cert) -> + cert_info(fun(#'OTPCertificate' { + tbsCertificate = #'OTPTBSCertificate' { + subject = Subject }}) -> + format_rdn_sequence(Subject) + end, Cert). + +%% Return a part of the certificate's subject. +peer_cert_subject_item(Cert, Type) -> + cert_info(fun(#'OTPCertificate' { + tbsCertificate = #'OTPTBSCertificate' { + subject = Subject }}) -> + find_by_type(Type, Subject) + end, Cert). + +%% Return a string describing the certificate's validity. +peer_cert_validity(Cert) -> + cert_info(fun(#'OTPCertificate' { + tbsCertificate = #'OTPTBSCertificate' { + validity = {'Validity', Start, End} }}) -> + lists:flatten( + io_lib:format("~s - ~s", [format_asn1_value(Start), + format_asn1_value(End)])) + end, Cert). + +%%-------------------------------------------------------------------------- + +cert_info(F, Cert) -> + F(case public_key:pkix_decode_cert(Cert, otp) of + {ok, DecCert} -> DecCert; %%pre R14B + DecCert -> DecCert %%R14B onwards + end). + +find_by_type(Type, {rdnSequence, RDNs}) -> + case [V || #'AttributeTypeAndValue'{type = T, value = V} + <- lists:flatten(RDNs), + T == Type] of + [{printableString, S}] -> S; + [] -> not_found + end. + +%%-------------------------------------------------------------------------- +%% Formatting functions +%%-------------------------------------------------------------------------- + +%% Format and rdnSequence as a RFC4514 subject string. +format_rdn_sequence({rdnSequence, Seq}) -> + string:join(lists:reverse([format_complex_rdn(RDN) || RDN <- Seq]), ","). + +%% Format an RDN set. +format_complex_rdn(RDNs) -> + string:join([format_rdn(RDN) || RDN <- RDNs], "+"). + +%% Format an RDN. If the type name is unknown, use the dotted decimal +%% representation. See RFC4514, section 2.3. +format_rdn(#'AttributeTypeAndValue'{type = T, value = V}) -> + FV = escape_rdn_value(format_asn1_value(V)), + Fmts = [{?'id-at-surname' , "SN"}, + {?'id-at-givenName' , "GIVENNAME"}, + {?'id-at-initials' , "INITIALS"}, + {?'id-at-generationQualifier' , "GENERATIONQUALIFIER"}, + {?'id-at-commonName' , "CN"}, + {?'id-at-localityName' , "L"}, + {?'id-at-stateOrProvinceName' , "ST"}, + {?'id-at-organizationName' , "O"}, + {?'id-at-organizationalUnitName' , "OU"}, + {?'id-at-title' , "TITLE"}, + {?'id-at-countryName' , "C"}, + {?'id-at-serialNumber' , "SERIALNUMBER"}, + {?'id-at-pseudonym' , "PSEUDONYM"}, + {?'id-domainComponent' , "DC"}, + {?'id-emailAddress' , "EMAILADDRESS"}, + {?'street-address' , "STREET"}], + case proplists:lookup(T, Fmts) of + {_, Fmt} -> + io_lib:format(Fmt ++ "=~s", [FV]); + none when is_tuple(T) -> + TypeL = [io_lib:format("~w", [X]) || X <- tuple_to_list(T)], + io_lib:format("~s:~s", [string:join(TypeL, "."), FV]); + none -> + io_lib:format("~p:~s", [T, FV]) + end. + +%% Escape a string as per RFC4514. +escape_rdn_value(V) -> + escape_rdn_value(V, start). + +escape_rdn_value([], _) -> + []; +escape_rdn_value([C | S], start) when C =:= $ ; C =:= $# -> + [$\\, C | escape_rdn_value(S, middle)]; +escape_rdn_value(S, start) -> + escape_rdn_value(S, middle); +escape_rdn_value([$ ], middle) -> + [$\\, $ ]; +escape_rdn_value([C | S], middle) when C =:= $"; C =:= $+; C =:= $,; C =:= $;; + C =:= $<; C =:= $>; C =:= $\\ -> + [$\\, C | escape_rdn_value(S, middle)]; +escape_rdn_value([C | S], middle) when C < 32 ; C =:= 127 -> + %% only U+0000 needs escaping, but for display purposes it's handy + %% to escape all non-printable chars + lists:flatten(io_lib:format("\\~2.16.0B", [C])) ++ + escape_rdn_value(S, middle); +escape_rdn_value([C | S], middle) -> + [C | escape_rdn_value(S, middle)]. + +%% Get the string representation of an OTPCertificate field. +format_asn1_value({ST, S}) when ST =:= teletexString; ST =:= printableString; + ST =:= universalString; ST =:= utf8String; + ST =:= bmpString -> + if is_binary(S) -> binary_to_list(S); + true -> S + end; +format_asn1_value({utcTime, [Y1, Y2, M1, M2, D1, D2, H1, H2, + Min1, Min2, S1, S2, $Z]}) -> + io_lib:format("20~c~c-~c~c-~c~cT~c~c:~c~c:~c~cZ", + [Y1, Y2, M1, M2, D1, D2, H1, H2, Min1, Min2, S1, S2]); +format_asn1_value(V) -> + io_lib:format("~p", [V]). diff -Nru rabbitmq-server-1.7.2/src/rabbit_sup.erl rabbitmq-server-2.3.1/src/rabbit_sup.erl --- rabbitmq-server-1.7.2/src/rabbit_sup.erl 2010-02-15 16:01:03.000000000 +0000 +++ rabbitmq-server-2.3.1/src/rabbit_sup.erl 2011-02-03 12:47:35.000000000 +0000 @@ -1,42 +1,30 @@ -%% The contents of this file are subject to the Mozilla Public License -%% Version 1.1 (the "License"); you may not use this file except in -%% compliance with the License. You may obtain a copy of the License at -%% http://www.mozilla.org/MPL/ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License +%% at http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and +%% limitations under the License. %% -%% Software distributed under the License is distributed on an "AS IS" -%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the -%% License for the specific language governing rights and limitations -%% under the License. +%% The Original Code is RabbitMQ. %% -%% The Original Code is RabbitMQ. -%% -%% The Initial Developers of the Original Code are LShift Ltd, -%% Cohesive Financial Technologies LLC, and Rabbit Technologies Ltd. -%% -%% Portions created before 22-Nov-2008 00:00:00 GMT by LShift Ltd, -%% Cohesive Financial Technologies LLC, or Rabbit Technologies Ltd -%% are Copyright (C) 2007-2008 LShift Ltd, Cohesive Financial -%% Technologies LLC, and Rabbit Technologies Ltd. -%% -%% Portions created by LShift Ltd are Copyright (C) 2007-2010 LShift -%% Ltd. Portions created by Cohesive Financial Technologies LLC are -%% Copyright (C) 2007-2010 Cohesive Financial Technologies -%% LLC. Portions created by Rabbit Technologies Ltd are Copyright -%% (C) 2007-2010 Rabbit Technologies Ltd. -%% -%% All Rights Reserved. -%% -%% Contributor(s): ______________________________________. +%% The Initial Developer of the Original Code is VMware, Inc. +%% Copyright (c) 2007-2011 VMware, Inc. All rights reserved. %% -module(rabbit_sup). -behaviour(supervisor). --export([start_link/0, start_child/1, start_child/2]). +-export([start_link/0, start_child/1, start_child/2, start_child/3, + start_restartable_child/1, start_restartable_child/2, stop_child/1]). -export([init/1]). +-include("rabbit.hrl"). + -define(SERVER, ?MODULE). start_link() -> @@ -46,10 +34,31 @@ start_child(Mod, []). start_child(Mod, Args) -> + start_child(Mod, Mod, Args). + +start_child(ChildId, Mod, Args) -> {ok, _} = supervisor:start_child(?SERVER, - {Mod, {Mod, start_link, Args}, - transient, 100, worker, [Mod]}), + {ChildId, {Mod, start_link, Args}, + transient, ?MAX_WAIT, worker, [Mod]}), ok. +start_restartable_child(Mod) -> + start_restartable_child(Mod, []). + +start_restartable_child(Mod, Args) -> + Name = list_to_atom(atom_to_list(Mod) ++ "_sup"), + {ok, _} = supervisor:start_child( + ?SERVER, + {Name, {rabbit_restartable_sup, start_link, + [Name, {Mod, start_link, Args}]}, + transient, infinity, supervisor, [rabbit_restartable_sup]}), + ok. + +stop_child(ChildId) -> + case supervisor:terminate_child(?SERVER, ChildId) of + ok -> supervisor:delete_child(?SERVER, ChildId); + E -> E + end. + init([]) -> - {ok, {{one_for_one, 10, 10}, []}}. + {ok, {{one_for_all, 0, 1}, []}}. diff -Nru rabbitmq-server-1.7.2/src/rabbit_tests.erl rabbitmq-server-2.3.1/src/rabbit_tests.erl --- rabbitmq-server-1.7.2/src/rabbit_tests.erl 2010-02-15 16:01:03.000000000 +0000 +++ rabbitmq-server-2.3.1/src/rabbit_tests.erl 2011-02-03 12:47:35.000000000 +0000 @@ -1,32 +1,17 @@ -%% The contents of this file are subject to the Mozilla Public License -%% Version 1.1 (the "License"); you may not use this file except in -%% compliance with the License. You may obtain a copy of the License at -%% http://www.mozilla.org/MPL/ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License +%% at http://www.mozilla.org/MPL/ %% -%% Software distributed under the License is distributed on an "AS IS" -%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the -%% License for the specific language governing rights and limitations -%% under the License. +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and +%% limitations under the License. %% -%% The Original Code is RabbitMQ. +%% The Original Code is RabbitMQ. %% -%% The Initial Developers of the Original Code are LShift Ltd, -%% Cohesive Financial Technologies LLC, and Rabbit Technologies Ltd. -%% -%% Portions created before 22-Nov-2008 00:00:00 GMT by LShift Ltd, -%% Cohesive Financial Technologies LLC, or Rabbit Technologies Ltd -%% are Copyright (C) 2007-2008 LShift Ltd, Cohesive Financial -%% Technologies LLC, and Rabbit Technologies Ltd. -%% -%% Portions created by LShift Ltd are Copyright (C) 2007-2010 LShift -%% Ltd. Portions created by Cohesive Financial Technologies LLC are -%% Copyright (C) 2007-2010 Cohesive Financial Technologies -%% LLC. Portions created by Rabbit Technologies Ltd are Copyright -%% (C) 2007-2010 Rabbit Technologies Ltd. -%% -%% All Rights Reserved. -%% -%% Contributor(s): ______________________________________. +%% The Initial Developer of the Original Code is VMware, Inc. +%% Copyright (c) 2007-2011 VMware, Inc. All rights reserved. %% -module(rabbit_tests). @@ -35,14 +20,13 @@ -export([all_tests/0, test_parsing/0]). -%% Exported so the hook mechanism can call back --export([handle_hook/3, bad_handle_hook/3, extra_arg_hook/5]). - --import(lists). - -include("rabbit.hrl"). +-include("rabbit_framing.hrl"). -include_lib("kernel/include/file.hrl"). +-define(PERSISTENT_MSG_STORE, msg_store_persistent). +-define(TRANSIENT_MSG_STORE, msg_store_transient). + test_content_prop_roundtrip(Datum, Binary) -> Types = [element(1, E) || E <- Datum], Values = [element(2, E) || E <- Datum], @@ -50,18 +34,69 @@ Binary = rabbit_binary_generator:encode_properties(Types, Values). %% assertion all_tests() -> + application:set_env(rabbit, file_handles_high_watermark, 10, infinity), + ok = file_handle_cache:set_limit(10), + passed = test_file_handle_cache(), + passed = test_backing_queue(), passed = test_priority_queue(), + passed = test_bpqueue(), passed = test_pg_local(), passed = test_unfold(), + passed = test_supervisor_delayed_restart(), passed = test_parsing(), + passed = test_content_framing(), + passed = test_content_transcoding(), passed = test_topic_matching(), passed = test_log_management(), passed = test_app_management(), passed = test_log_management_during_startup(), + passed = test_statistics(), + passed = test_option_parser(), passed = test_cluster_management(), passed = test_user_management(), passed = test_server_status(), - passed = test_hooks(), + passed = maybe_run_cluster_dependent_tests(), + passed = test_configurable_server_properties(), + passed. + +maybe_run_cluster_dependent_tests() -> + SecondaryNode = rabbit_misc:makenode("hare"), + + case net_adm:ping(SecondaryNode) of + pong -> passed = run_cluster_dependent_tests(SecondaryNode); + pang -> io:format("Skipping cluster dependent tests with node ~p~n", + [SecondaryNode]) + end, + passed. + +run_cluster_dependent_tests(SecondaryNode) -> + SecondaryNodeS = atom_to_list(SecondaryNode), + + ok = control_action(stop_app, []), + ok = control_action(reset, []), + ok = control_action(cluster, [SecondaryNodeS]), + ok = control_action(start_app, []), + + io:format("Running cluster dependent tests with node ~p~n", [SecondaryNode]), + passed = test_delegates_async(SecondaryNode), + passed = test_delegates_sync(SecondaryNode), + + %% we now run the tests remotely, so that code coverage on the + %% local node picks up more of the delegate + Node = node(), + Self = self(), + Remote = spawn(SecondaryNode, + fun () -> A = test_delegates_async(Node), + B = test_delegates_sync(Node), + Self ! {self(), {A, B}} + end), + receive + {Remote, Result} -> + Result = {passed, passed} + after 2000 -> + throw(timeout) + end, + passed. test_priority_queue() -> @@ -179,6 +214,143 @@ priority_queue:to_list(Q), priority_queue_out_all(Q)}. +test_bpqueue() -> + Q = bpqueue:new(), + true = bpqueue:is_empty(Q), + 0 = bpqueue:len(Q), + [] = bpqueue:to_list(Q), + + Q1 = bpqueue_test(fun bpqueue:in/3, fun bpqueue:out/1, + fun bpqueue:to_list/1, + fun bpqueue:foldl/3, fun bpqueue:map_fold_filter_l/4), + Q2 = bpqueue_test(fun bpqueue:in_r/3, fun bpqueue:out_r/1, + fun (QR) -> lists:reverse( + [{P, lists:reverse(L)} || + {P, L} <- bpqueue:to_list(QR)]) + end, + fun bpqueue:foldr/3, fun bpqueue:map_fold_filter_r/4), + + [{foo, [1, 2]}, {bar, [3]}] = bpqueue:to_list(bpqueue:join(Q, Q1)), + [{bar, [3]}, {foo, [2, 1]}] = bpqueue:to_list(bpqueue:join(Q2, Q)), + [{foo, [1, 2]}, {bar, [3, 3]}, {foo, [2,1]}] = + bpqueue:to_list(bpqueue:join(Q1, Q2)), + + [{foo, [1, 2]}, {bar, [3]}, {foo, [1, 2]}, {bar, [3]}] = + bpqueue:to_list(bpqueue:join(Q1, Q1)), + + [{foo, [1, 2]}, {bar, [3]}] = + bpqueue:to_list( + bpqueue:from_list( + [{x, []}, {foo, [1]}, {y, []}, {foo, [2]}, {bar, [3]}, {z, []}])), + + [{undefined, [a]}] = bpqueue:to_list(bpqueue:from_list([{undefined, [a]}])), + + {4, [a,b,c,d]} = + bpqueue:foldl( + fun (Prefix, Value, {Prefix, Acc}) -> + {Prefix + 1, [Value | Acc]} + end, + {0, []}, bpqueue:from_list([{0,[d]}, {1,[c]}, {2,[b]}, {3,[a]}])), + + [{bar,3}, {foo,2}, {foo,1}] = + bpqueue:foldr(fun (P, V, I) -> [{P,V} | I] end, [], Q2), + + BPQL = [{foo,[1,2,2]}, {bar,[3,4,5]}, {foo,[5,6,7]}], + BPQ = bpqueue:from_list(BPQL), + + %% no effect + {BPQL, 0} = bpqueue_mffl([none], {none, []}, BPQ), + {BPQL, 0} = bpqueue_mffl([foo,bar], {none, [1]}, BPQ), + {BPQL, 0} = bpqueue_mffl([bar], {none, [3]}, BPQ), + {BPQL, 0} = bpqueue_mffr([bar], {foo, [5]}, BPQ), + + %% process 1 item + {[{foo,[-1,2,2]}, {bar,[3,4,5]}, {foo,[5,6,7]}], 1} = + bpqueue_mffl([foo,bar], {foo, [2]}, BPQ), + {[{foo,[1,2,2]}, {bar,[-3,4,5]}, {foo,[5,6,7]}], 1} = + bpqueue_mffl([bar], {bar, [4]}, BPQ), + {[{foo,[1,2,2]}, {bar,[3,4,5]}, {foo,[5,6,-7]}], 1} = + bpqueue_mffr([foo,bar], {foo, [6]}, BPQ), + {[{foo,[1,2,2]}, {bar,[3,4]}, {baz,[-5]}, {foo,[5,6,7]}], 1} = + bpqueue_mffr([bar], {baz, [4]}, BPQ), + + %% change prefix + {[{bar,[-1,-2,-2,-3,-4,-5,-5,-6,-7]}], 9} = + bpqueue_mffl([foo,bar], {bar, []}, BPQ), + {[{bar,[-1,-2,-2,3,4,5]}, {foo,[5,6,7]}], 3} = + bpqueue_mffl([foo], {bar, [5]}, BPQ), + {[{bar,[-1,-2,-2,3,4,5,-5,-6]}, {foo,[7]}], 5} = + bpqueue_mffl([foo], {bar, [7]}, BPQ), + {[{foo,[1,2,2,-3,-4]}, {bar,[5]}, {foo,[5,6,7]}], 2} = + bpqueue_mffl([bar], {foo, [5]}, BPQ), + {[{bar,[-1,-2,-2,3,4,5,-5,-6,-7]}], 6} = + bpqueue_mffl([foo], {bar, []}, BPQ), + {[{foo,[1,2,2,-3,-4,-5,5,6,7]}], 3} = + bpqueue_mffl([bar], {foo, []}, BPQ), + + %% edge cases + {[{foo,[-1,-2,-2]}, {bar,[3,4,5]}, {foo,[5,6,7]}], 3} = + bpqueue_mffl([foo], {foo, [5]}, BPQ), + {[{foo,[1,2,2]}, {bar,[3,4,5]}, {foo,[-5,-6,-7]}], 3} = + bpqueue_mffr([foo], {foo, [2]}, BPQ), + + passed. + +bpqueue_test(In, Out, List, Fold, MapFoldFilter) -> + Q = bpqueue:new(), + {empty, _Q} = Out(Q), + + ok = Fold(fun (Prefix, Value, ok) -> {error, Prefix, Value} end, ok, Q), + {Q1M, 0} = MapFoldFilter(fun(_P) -> throw(explosion) end, + fun(_V, _N) -> throw(explosion) end, 0, Q), + [] = bpqueue:to_list(Q1M), + + Q1 = In(bar, 3, In(foo, 2, In(foo, 1, Q))), + false = bpqueue:is_empty(Q1), + 3 = bpqueue:len(Q1), + [{foo, [1, 2]}, {bar, [3]}] = List(Q1), + + {{value, foo, 1}, Q3} = Out(Q1), + {{value, foo, 2}, Q4} = Out(Q3), + {{value, bar, 3}, _Q5} = Out(Q4), + + F = fun (QN) -> + MapFoldFilter(fun (foo) -> true; + (_) -> false + end, + fun (2, _Num) -> stop; + (V, Num) -> {bar, -V, V - Num} end, + 0, QN) + end, + {Q6, 0} = F(Q), + [] = bpqueue:to_list(Q6), + {Q7, 1} = F(Q1), + [{bar, [-1]}, {foo, [2]}, {bar, [3]}] = List(Q7), + + Q1. + +bpqueue_mffl(FF1A, FF2A, BPQ) -> + bpqueue_mff(fun bpqueue:map_fold_filter_l/4, FF1A, FF2A, BPQ). + +bpqueue_mffr(FF1A, FF2A, BPQ) -> + bpqueue_mff(fun bpqueue:map_fold_filter_r/4, FF1A, FF2A, BPQ). + +bpqueue_mff(Fold, FF1A, FF2A, BPQ) -> + FF1 = fun (Prefixes) -> + fun (P) -> lists:member(P, Prefixes) end + end, + FF2 = fun ({Prefix, Stoppers}) -> + fun (Val, Num) -> + case lists:member(Val, Stoppers) of + true -> stop; + false -> {Prefix, -Val, 1 + Num} + end + end + end, + Queue_to_list = fun ({LHS, RHS}) -> {bpqueue:to_list(LHS), RHS} end, + + Queue_to_list(Fold(FF1(FF1A), FF2(FF2A), 0, BPQ)). + test_simple_n_element_queue(N) -> Items = lists:seq(1, N), Q = priority_queue_in_all(priority_queue:new(), Items), @@ -326,11 +498,94 @@ >>), passed. +%% Test that content frames don't exceed frame-max +test_content_framing(FrameMax, BodyBin) -> + [Header | Frames] = + rabbit_binary_generator:build_simple_content_frames( + 1, + rabbit_binary_generator:ensure_content_encoded( + rabbit_basic:build_content(#'P_basic'{}, BodyBin), + rabbit_framing_amqp_0_9_1), + FrameMax, + rabbit_framing_amqp_0_9_1), + %% header is formatted correctly and the size is the total of the + %% fragments + <<_FrameHeader:7/binary, _ClassAndWeight:4/binary, + BodySize:64/unsigned, _Rest/binary>> = list_to_binary(Header), + BodySize = size(BodyBin), + true = lists:all( + fun (ContentFrame) -> + FrameBinary = list_to_binary(ContentFrame), + %% assert + <<_TypeAndChannel:3/binary, + Size:32/unsigned, _Payload:Size/binary, 16#CE>> = + FrameBinary, + size(FrameBinary) =< FrameMax + end, Frames), + passed. + +test_content_framing() -> + %% no content + passed = test_content_framing(4096, <<>>), + %% easily fit in one frame + passed = test_content_framing(4096, <<"Easy">>), + %% exactly one frame (empty frame = 8 bytes) + passed = test_content_framing(11, <<"One">>), + %% more than one frame + passed = test_content_framing(11, <<"More than one frame">>), + passed. + +test_content_transcoding() -> + %% there are no guarantees provided by 'clear' - it's just a hint + ClearDecoded = fun rabbit_binary_parser:clear_decoded_content/1, + ClearEncoded = fun rabbit_binary_generator:clear_encoded_content/1, + EnsureDecoded = + fun (C0) -> + C1 = rabbit_binary_parser:ensure_content_decoded(C0), + true = C1#content.properties =/= none, + C1 + end, + EnsureEncoded = + fun (Protocol) -> + fun (C0) -> + C1 = rabbit_binary_generator:ensure_content_encoded( + C0, Protocol), + true = C1#content.properties_bin =/= none, + C1 + end + end, + %% Beyond the assertions in Ensure*, the only testable guarantee + %% is that the operations should never fail. + %% + %% If we were using quickcheck we'd simply stuff all the above + %% into a generator for sequences of operations. In the absence of + %% quickcheck we pick particularly interesting sequences that: + %% + %% - execute every op twice since they are idempotent + %% - invoke clear_decoded, clear_encoded, decode and transcode + %% with one or both of decoded and encoded content present + [begin + sequence_with_content([Op]), + sequence_with_content([ClearEncoded, Op]), + sequence_with_content([ClearDecoded, Op]) + end || Op <- [ClearDecoded, ClearEncoded, EnsureDecoded, + EnsureEncoded(rabbit_framing_amqp_0_9_1), + EnsureEncoded(rabbit_framing_amqp_0_8)]], + passed. + +sequence_with_content(Sequence) -> + lists:foldl(fun (F, V) -> F(F(V)) end, + rabbit_binary_generator:ensure_content_encoded( + rabbit_basic:build_content(#'P_basic'{}, <<>>), + rabbit_framing_amqp_0_9_1), + Sequence). + test_topic_match(P, R) -> test_topic_match(P, R, true). test_topic_match(P, R, Expected) -> - case rabbit_exchange:topic_matches(list_to_binary(P), list_to_binary(R)) of + case rabbit_exchange_type_topic:topic_matches(list_to_binary(P), + list_to_binary(R)) of Expected -> passed; _ -> @@ -515,6 +770,30 @@ ok = control_action(start_app, []), passed. +test_option_parser() -> + % command and arguments should just pass through + ok = check_get_options({["mock_command", "arg1", "arg2"], []}, + [], ["mock_command", "arg1", "arg2"]), + + % get flags + ok = check_get_options( + {["mock_command", "arg1"], [{"-f", true}, {"-f2", false}]}, + [{flag, "-f"}, {flag, "-f2"}], ["mock_command", "arg1", "-f"]), + + % get options + ok = check_get_options( + {["mock_command"], [{"-foo", "bar"}, {"-baz", "notbaz"}]}, + [{option, "-foo", "notfoo"}, {option, "-baz", "notbaz"}], + ["mock_command", "-foo", "bar"]), + + % shuffled and interleaved arguments and options + ok = check_get_options( + {["a1", "a2", "a3"], [{"-o1", "hello"}, {"-o2", "noto2"}, {"-f", true}]}, + [{option, "-o1", "noto1"}, {flag, "-f"}, {option, "-o2", "noto2"}], + ["-f", "a1", "-o1", "hello", "a2", "a3"]), + + passed. + test_cluster_management() -> %% 'cluster' and 'reset' should only work if the app is stopped @@ -533,19 +812,19 @@ ok = control_action(reset, []), lists:foreach(fun (Arg) -> - ok = control_action(cluster, Arg), + ok = control_action(force_cluster, Arg), ok end, ClusteringSequence), lists:foreach(fun (Arg) -> ok = control_action(reset, []), - ok = control_action(cluster, Arg), + ok = control_action(force_cluster, Arg), ok end, ClusteringSequence), ok = control_action(reset, []), lists:foreach(fun (Arg) -> - ok = control_action(cluster, Arg), + ok = control_action(force_cluster, Arg), ok = control_action(start_app, []), ok = control_action(stop_app, []), ok @@ -553,7 +832,7 @@ ClusteringSequence), lists:foreach(fun (Arg) -> ok = control_action(reset, []), - ok = control_action(cluster, Arg), + ok = control_action(force_cluster, Arg), ok = control_action(start_app, []), ok = control_action(stop_app, []), ok @@ -564,13 +843,13 @@ ok = control_action(reset, []), ok = control_action(start_app, []), ok = control_action(stop_app, []), - ok = control_action(cluster, ["invalid1@invalid", - "invalid2@invalid"]), + ok = control_action(force_cluster, ["invalid1@invalid", + "invalid2@invalid"]), %% join a non-existing cluster as a ram node ok = control_action(reset, []), - ok = control_action(cluster, ["invalid1@invalid", - "invalid2@invalid"]), + ok = control_action(force_cluster, ["invalid1@invalid", + "invalid2@invalid"]), SecondaryNode = rabbit_misc:makenode("hare"), case net_adm:ping(SecondaryNode) of @@ -595,18 +874,26 @@ %% join cluster as a ram node ok = control_action(reset, []), - ok = control_action(cluster, [SecondaryNodeS, "invalid1@invalid"]), + ok = control_action(force_cluster, [SecondaryNodeS, "invalid1@invalid"]), ok = control_action(start_app, []), ok = control_action(stop_app, []), %% change cluster config while remaining in same cluster - ok = control_action(cluster, ["invalid2@invalid", SecondaryNodeS]), + ok = control_action(force_cluster, ["invalid2@invalid", SecondaryNodeS]), ok = control_action(start_app, []), ok = control_action(stop_app, []), %% join non-existing cluster as a ram node - ok = control_action(cluster, ["invalid1@invalid", - "invalid2@invalid"]), + ok = control_action(force_cluster, ["invalid1@invalid", + "invalid2@invalid"]), + ok = control_action(start_app, []), + ok = control_action(stop_app, []), + + %% join empty cluster as a ram node + ok = control_action(cluster, []), + ok = control_action(start_app, []), + ok = control_action(stop_app, []), + %% turn ram node into disk node ok = control_action(reset, []), ok = control_action(cluster, [SecondaryNodeS, NodeS]), @@ -614,8 +901,8 @@ ok = control_action(stop_app, []), %% convert a disk node into a ram node - ok = control_action(cluster, ["invalid1@invalid", - "invalid2@invalid"]), + ok = control_action(force_cluster, ["invalid1@invalid", + "invalid2@invalid"]), %% turn a disk node into a ram node ok = control_action(reset, []), @@ -624,8 +911,12 @@ ok = control_action(stop_app, []), %% NB: this will log an inconsistent_database error, which is harmless + %% Turning cover on / off is OK even if we're not in general using cover, + %% it just turns the engine on / off, doesn't actually log anything. + cover:stop([SecondaryNode]), true = disconnect_node(SecondaryNode), pong = net_adm:ping(SecondaryNode), + cover:start([SecondaryNode]), %% leaving a cluster as a ram node ok = control_action(reset, []), @@ -638,7 +929,7 @@ %% attempt to leave cluster when no other node is alive ok = control_action(cluster, [SecondaryNodeS, NodeS]), ok = control_action(start_app, []), - ok = control_action(stop_app, SecondaryNode, []), + ok = control_action(stop_app, SecondaryNode, [], []), ok = control_action(stop_app, []), {error, {no_running_cluster_nodes, _, _}} = control_action(reset, []), @@ -646,9 +937,9 @@ %% leave system clustered, with the secondary node as a ram node ok = control_action(force_reset, []), ok = control_action(start_app, []), - ok = control_action(force_reset, SecondaryNode, []), - ok = control_action(cluster, SecondaryNode, [NodeS]), - ok = control_action(start_app, SecondaryNode, []), + ok = control_action(force_reset, SecondaryNode, [], []), + ok = control_action(cluster, SecondaryNode, [NodeS], []), + ok = control_action(start_app, SecondaryNode, [], []), passed. @@ -668,7 +959,7 @@ {error, {no_such_user, _}} = control_action(list_user_permissions, ["foo"]), {error, {no_such_vhost, _}} = - control_action(list_permissions, ["-p", "/testhost"]), + control_action(list_permissions, [], [{"-p", "/testhost"}]), {error, {invalid_regexp, _, _}} = control_action(set_permissions, ["guest", "+foo", ".*", ".*"]), @@ -677,6 +968,8 @@ {error, {user_already_exists, _}} = control_action(add_user, ["foo", "bar"]), ok = control_action(change_password, ["foo", "baz"]), + ok = control_action(set_admin, ["foo"]), + ok = control_action(clear_admin, ["foo"]), ok = control_action(list_users, []), %% vhost creation @@ -686,16 +979,19 @@ ok = control_action(list_vhosts, []), %% user/vhost mapping - ok = control_action(set_permissions, ["-p", "/testhost", - "foo", ".*", ".*", ".*"]), - ok = control_action(set_permissions, ["-p", "/testhost", - "foo", ".*", ".*", ".*"]), - ok = control_action(list_permissions, ["-p", "/testhost"]), + ok = control_action(set_permissions, ["foo", ".*", ".*", ".*"], + [{"-p", "/testhost"}]), + ok = control_action(set_permissions, ["foo", ".*", ".*", ".*"], + [{"-p", "/testhost"}]), + ok = control_action(set_permissions, ["foo", ".*", ".*", ".*"], + [{"-p", "/testhost"}]), + ok = control_action(list_permissions, [], [{"-p", "/testhost"}]), + ok = control_action(list_permissions, [], [{"-p", "/testhost"}]), ok = control_action(list_user_permissions, ["foo"]), %% user/vhost unmapping - ok = control_action(clear_permissions, ["-p", "/testhost", "foo"]), - ok = control_action(clear_permissions, ["-p", "/testhost", "foo"]), + ok = control_action(clear_permissions, ["foo"], [{"-p", "/testhost"}]), + ok = control_action(clear_permissions, ["foo"], [{"-p", "/testhost"}]), %% vhost deletion ok = control_action(delete_vhost, ["/testhost"]), @@ -704,8 +1000,8 @@ %% deleting a populated vhost ok = control_action(add_vhost, ["/testhost"]), - ok = control_action(set_permissions, ["-p", "/testhost", - "foo", ".*", ".*", ".*"]), + ok = control_action(set_permissions, ["foo", ".*", ".*", ".*"], + [{"-p", "/testhost"}]), ok = control_action(delete_vhost, ["/testhost"]), %% user deletion @@ -716,17 +1012,18 @@ passed. test_server_status() -> - %% create a few things so there is some useful information to list Writer = spawn(fun () -> receive shutdown -> ok end end), - Ch = rabbit_channel:start_link(1, self(), Writer, <<"user">>, <<"/">>), - [Q, Q2] = [#amqqueue{} = rabbit_amqqueue:declare( + {ok, Ch} = rabbit_channel:start_link(1, self(), Writer, + user(<<"user">>), <<"/">>, self(), + fun (_) -> {ok, self()} end), + [Q, Q2] = [Queue || Name <- [<<"foo">>, <<"bar">>], + {new, Queue = #amqqueue{}} <- + [rabbit_amqqueue:declare( rabbit_misc:r(<<"/">>, queue, Name), - false, false, []) || - Name <- [<<"foo">>, <<"bar">>]], + false, false, [], none)]], - ok = rabbit_amqqueue:claim_queue(Q, self()), - ok = rabbit_amqqueue:basic_consume(Q, true, self(), Ch, undefined, + ok = rabbit_amqqueue:basic_consume(Q, true, Ch, undefined, <<"ctag">>, true, undefined), %% list queues @@ -736,7 +1033,15 @@ ok = info_action(list_exchanges, rabbit_exchange:info_keys(), true), %% list bindings - ok = control_action(list_bindings, []), + ok = info_action(list_bindings, rabbit_binding:info_keys(), true), + %% misc binding listing APIs + [_|_] = rabbit_binding:list_for_source( + rabbit_misc:r(<<"/">>, exchange, <<"">>)), + [_] = rabbit_binding:list_for_destination( + rabbit_misc:r(<<"/">>, queue, <<"foo">>)), + [_] = rabbit_binding:list_for_source_and_destination( + rabbit_misc:r(<<"/">>, exchange, <<"">>), + rabbit_misc:r(<<"/">>, queue, <<"foo">>)), %% list connections [#listener{host = H, port = P} | _] = @@ -760,63 +1065,231 @@ %% cleanup [{ok, _} = rabbit_amqqueue:delete(QR, false, false) || QR <- [Q, Q2]], + + unlink(Ch), ok = rabbit_channel:shutdown(Ch), passed. -test_hooks() -> - %% Firing of hooks calls all hooks in an isolated manner - rabbit_hooks:subscribe(test_hook, test, {rabbit_tests, handle_hook, []}), - rabbit_hooks:subscribe(test_hook, test2, {rabbit_tests, handle_hook, []}), - rabbit_hooks:subscribe(test_hook2, test2, {rabbit_tests, handle_hook, []}), - rabbit_hooks:trigger(test_hook, [arg1, arg2]), - [arg1, arg2] = get(test_hook_test_fired), - [arg1, arg2] = get(test_hook_test2_fired), - undefined = get(test_hook2_test2_fired), - - %% Hook Deletion works - put(test_hook_test_fired, undefined), - put(test_hook_test2_fired, undefined), - rabbit_hooks:unsubscribe(test_hook, test), - rabbit_hooks:trigger(test_hook, [arg3, arg4]), - undefined = get(test_hook_test_fired), - [arg3, arg4] = get(test_hook_test2_fired), - undefined = get(test_hook2_test2_fired), - - %% Catches exceptions from bad hooks - rabbit_hooks:subscribe(test_hook3, test, {rabbit_tests, bad_handle_hook, []}), - ok = rabbit_hooks:trigger(test_hook3, []), - - %% Passing extra arguments to hooks - rabbit_hooks:subscribe(arg_hook, test, {rabbit_tests, extra_arg_hook, [1, 3]}), - rabbit_hooks:trigger(arg_hook, [arg1, arg2]), - {[arg1, arg2], 1, 3} = get(arg_hook_test_fired), - - %% Invoking Pids - Remote = fun() -> - receive - {rabbitmq_hook,[remote_test,test,[],Target]} -> - Target ! invoked - end +test_spawn(Receiver) -> + Me = self(), + Writer = spawn(fun () -> Receiver(Me) end), + {ok, Ch} = rabbit_channel:start_link(1, Me, Writer, + user(<<"guest">>), <<"/">>, self(), + fun (_) -> {ok, self()} end), + ok = rabbit_channel:do(Ch, #'channel.open'{}), + receive #'channel.open_ok'{} -> ok + after 1000 -> throw(failed_to_receive_channel_open_ok) end, - P = spawn(Remote), - rabbit_hooks:subscribe(remote_test, test, {rabbit_hooks, notify_remote, [P, [self()]]}), - rabbit_hooks:trigger(remote_test, []), + {Writer, Ch}. + +user(Username) -> + #user{username = Username, + is_admin = true, + auth_backend = rabbit_auth_backend_internal, + impl = #internal_user{username = Username, + is_admin = true}}. + +test_statistics_receiver(Pid) -> receive - invoked -> ok - after 100 -> - io:format("Remote hook not invoked"), - throw(timeout) - end, + shutdown -> + ok; + {send_command, Method} -> + Pid ! Method, + test_statistics_receiver(Pid) + end. + +test_statistics_event_receiver(Pid) -> + receive + Foo -> + Pid ! Foo, + test_statistics_event_receiver(Pid) + end. + +test_statistics_receive_event(Ch, Matcher) -> + rabbit_channel:flush(Ch), + rabbit_channel:emit_stats(Ch), + test_statistics_receive_event1(Ch, Matcher). + +test_statistics_receive_event1(Ch, Matcher) -> + receive #event{type = channel_stats, props = Props} -> + case Matcher(Props) of + true -> Props; + _ -> test_statistics_receive_event1(Ch, Matcher) + end + after 1000 -> throw(failed_to_receive_event) + end. + +test_statistics() -> + application:set_env(rabbit, collect_statistics, fine), + + %% ATM this just tests the queue / exchange stats in channels. That's + %% by far the most complex code though. + + %% Set up a channel and queue + {_Writer, Ch} = test_spawn(fun test_statistics_receiver/1), + rabbit_channel:do(Ch, #'queue.declare'{}), + QName = receive #'queue.declare_ok'{queue = Q0} -> + Q0 + after 1000 -> throw(failed_to_receive_queue_declare_ok) + end, + {ok, Q} = rabbit_amqqueue:lookup(rabbit_misc:r(<<"/">>, queue, QName)), + QPid = Q#amqqueue.pid, + X = rabbit_misc:r(<<"/">>, exchange, <<"">>), + + rabbit_tests_event_receiver:start(self()), + + %% Check stats empty + Event = test_statistics_receive_event(Ch, fun (_) -> true end), + [] = proplists:get_value(channel_queue_stats, Event), + [] = proplists:get_value(channel_exchange_stats, Event), + [] = proplists:get_value(channel_queue_exchange_stats, Event), + + %% Publish and get a message + rabbit_channel:do(Ch, #'basic.publish'{exchange = <<"">>, + routing_key = QName}, + rabbit_basic:build_content(#'P_basic'{}, <<"">>)), + rabbit_channel:do(Ch, #'basic.get'{queue = QName}), + + %% Check the stats reflect that + Event2 = test_statistics_receive_event( + Ch, + fun (E) -> + length(proplists:get_value( + channel_queue_exchange_stats, E)) > 0 + end), + [{QPid,[{get,1}]}] = proplists:get_value(channel_queue_stats, Event2), + [{X,[{publish,1}]}] = proplists:get_value(channel_exchange_stats, Event2), + [{{QPid,X},[{publish,1}]}] = + proplists:get_value(channel_queue_exchange_stats, Event2), + + %% Check the stats remove stuff on queue deletion + rabbit_channel:do(Ch, #'queue.delete'{queue = QName}), + Event3 = test_statistics_receive_event( + Ch, + fun (E) -> + length(proplists:get_value( + channel_queue_exchange_stats, E)) == 0 + end), + + [] = proplists:get_value(channel_queue_stats, Event3), + [{X,[{publish,1}]}] = proplists:get_value(channel_exchange_stats, Event3), + [] = proplists:get_value(channel_queue_exchange_stats, Event3), + + rabbit_channel:shutdown(Ch), + rabbit_tests_event_receiver:stop(), + passed. + +test_delegates_async(SecondaryNode) -> + Self = self(), + Sender = fun (Pid) -> Pid ! {invoked, Self} end, + + Responder = make_responder(fun ({invoked, Pid}) -> Pid ! response end), + + ok = delegate:invoke_no_result(spawn(Responder), Sender), + ok = delegate:invoke_no_result(spawn(SecondaryNode, Responder), Sender), + await_response(2), + + LocalPids = spawn_responders(node(), Responder, 10), + RemotePids = spawn_responders(SecondaryNode, Responder, 10), + ok = delegate:invoke_no_result(LocalPids ++ RemotePids, Sender), + await_response(20), + + passed. + +make_responder(FMsg) -> make_responder(FMsg, timeout). +make_responder(FMsg, Throw) -> + fun () -> + receive Msg -> FMsg(Msg) + after 1000 -> throw(Throw) + end + end. + +spawn_responders(Node, Responder, Count) -> + [spawn(Node, Responder) || _ <- lists:seq(1, Count)]. + +await_response(0) -> + ok; +await_response(Count) -> + receive + response -> ok, + await_response(Count - 1) + after 1000 -> + io:format("Async reply not received~n"), + throw(timeout) + end. + +must_exit(Fun) -> + try + Fun(), + throw(exit_not_thrown) + catch + exit:_ -> ok + end. + +test_delegates_sync(SecondaryNode) -> + Sender = fun (Pid) -> gen_server:call(Pid, invoked) end, + BadSender = fun (_Pid) -> exit(exception) end, + + Responder = make_responder(fun ({'$gen_call', From, invoked}) -> + gen_server:reply(From, response) + end), + + BadResponder = make_responder(fun ({'$gen_call', From, invoked}) -> + gen_server:reply(From, response) + end, bad_responder_died), + + response = delegate:invoke(spawn(Responder), Sender), + response = delegate:invoke(spawn(SecondaryNode, Responder), Sender), + + must_exit(fun () -> delegate:invoke(spawn(BadResponder), BadSender) end), + must_exit(fun () -> + delegate:invoke(spawn(SecondaryNode, BadResponder), BadSender) end), + + LocalGoodPids = spawn_responders(node(), Responder, 2), + RemoteGoodPids = spawn_responders(SecondaryNode, Responder, 2), + LocalBadPids = spawn_responders(node(), BadResponder, 2), + RemoteBadPids = spawn_responders(SecondaryNode, BadResponder, 2), + + {GoodRes, []} = delegate:invoke(LocalGoodPids ++ RemoteGoodPids, Sender), + true = lists:all(fun ({_, response}) -> true end, GoodRes), + GoodResPids = [Pid || {Pid, _} <- GoodRes], + + Good = lists:usort(LocalGoodPids ++ RemoteGoodPids), + Good = lists:usort(GoodResPids), + + {[], BadRes} = delegate:invoke(LocalBadPids ++ RemoteBadPids, BadSender), + true = lists:all(fun ({_, {exit, exception, _}}) -> true end, BadRes), + BadResPids = [Pid || {Pid, _} <- BadRes], + + Bad = lists:usort(LocalBadPids ++ RemoteBadPids), + Bad = lists:usort(BadResPids), + + MagicalPids = [rabbit_misc:string_to_pid(Str) || + Str <- ["", ""]], + {[], BadNodes} = delegate:invoke(MagicalPids, Sender), + true = lists:all( + fun ({_, {exit, {nodedown, nonode@nohost}, _Stack}}) -> true end, + BadNodes), + BadNodesPids = [Pid || {Pid, _} <- BadNodes], + + Magical = lists:usort(MagicalPids), + Magical = lists:usort(BadNodesPids), + passed. %--------------------------------------------------------------------- -control_action(Command, Args) -> control_action(Command, node(), Args). +control_action(Command, Args) -> + control_action(Command, node(), Args, default_options()). + +control_action(Command, Args, NewOpts) -> + control_action(Command, node(), Args, + expand_options(default_options(), NewOpts)). -control_action(Command, Node, Args) -> +control_action(Command, Node, Args, Opts) -> case catch rabbit_control:action( - Command, Node, Args, + Command, Node, Args, Opts, fun (Format, Args1) -> io:format(Format ++ " ...~n", Args1) end) of @@ -830,13 +1303,28 @@ info_action(Command, Args, CheckVHost) -> ok = control_action(Command, []), - if CheckVHost -> ok = control_action(Command, ["-p", "/"]); + if CheckVHost -> ok = control_action(Command, []); true -> ok end, ok = control_action(Command, lists:map(fun atom_to_list/1, Args)), {bad_argument, dummy} = control_action(Command, ["dummy"]), ok. +default_options() -> [{"-p", "/"}, {"-q", "false"}]. + +expand_options(As, Bs) -> + lists:foldl(fun({K, _}=A, R) -> + case proplists:is_defined(K, R) of + true -> R; + false -> [A | R] + end + end, Bs, As). + +check_get_options({ExpArgs, ExpOpts}, Defs, Args) -> + {ExpArgs, ResOpts} = rabbit_misc:get_options(Defs, Args), + true = lists:sort(ExpOpts) == lists:sort(ResOpts), % don't care about the order + ok. + empty_files(Files) -> [case file:read_file_info(File) of {ok, FInfo} -> FInfo#file_info.size == 0; @@ -894,10 +1382,812 @@ Handler <- Handlers], ok. -handle_hook(HookName, Handler, Args) -> - A = atom_to_list(HookName) ++ "_" ++ atom_to_list(Handler) ++ "_fired", - put(list_to_atom(A), Args). -bad_handle_hook(_, _, _) -> - bad:bad(). -extra_arg_hook(Hookname, Handler, Args, Extra1, Extra2) -> - handle_hook(Hookname, Handler, {Args, Extra1, Extra2}). +test_supervisor_delayed_restart() -> + test_sup:test_supervisor_delayed_restart(). + +test_file_handle_cache() -> + %% test copying when there is just one spare handle + Limit = file_handle_cache:get_limit(), + ok = file_handle_cache:set_limit(5), %% 1 or 2 sockets, 2 msg_stores + TmpDir = filename:join(rabbit_mnesia:dir(), "tmp"), + ok = filelib:ensure_dir(filename:join(TmpDir, "nothing")), + Pid = spawn(fun () -> {ok, Hdl} = file_handle_cache:open( + filename:join(TmpDir, "file3"), + [write], []), + receive close -> ok end, + file_handle_cache:delete(Hdl) + end), + Src = filename:join(TmpDir, "file1"), + Dst = filename:join(TmpDir, "file2"), + Content = <<"foo">>, + ok = file:write_file(Src, Content), + {ok, SrcHdl} = file_handle_cache:open(Src, [read], []), + {ok, DstHdl} = file_handle_cache:open(Dst, [write], []), + Size = size(Content), + {ok, Size} = file_handle_cache:copy(SrcHdl, DstHdl, Size), + ok = file_handle_cache:delete(SrcHdl), + file_handle_cache:delete(DstHdl), + Pid ! close, + ok = file_handle_cache:set_limit(Limit), + passed. + +test_backing_queue() -> + case application:get_env(rabbit, backing_queue_module) of + {ok, rabbit_variable_queue} -> + {ok, FileSizeLimit} = + application:get_env(rabbit, msg_store_file_size_limit), + application:set_env(rabbit, msg_store_file_size_limit, 512, + infinity), + {ok, MaxJournal} = + application:get_env(rabbit, queue_index_max_journal_entries), + application:set_env(rabbit, queue_index_max_journal_entries, 128, + infinity), + passed = test_msg_store(), + application:set_env(rabbit, msg_store_file_size_limit, + FileSizeLimit, infinity), + passed = test_queue_index(), + passed = test_queue_index_props(), + passed = test_variable_queue(), + passed = test_variable_queue_delete_msg_store_files_callback(), + passed = test_queue_recover(), + application:set_env(rabbit, queue_index_max_journal_entries, + MaxJournal, infinity), + passed; + _ -> + passed + end. + +restart_msg_store_empty() -> + ok = rabbit_variable_queue:stop_msg_store(), + ok = rabbit_variable_queue:start_msg_store( + undefined, {fun (ok) -> finished end, ok}). + +guid_bin(X) -> + erlang:md5(term_to_binary(X)). + +msg_store_client_init(MsgStore, Ref) -> + rabbit_msg_store:client_init(MsgStore, Ref, undefined, undefined). + +msg_store_contains(Atom, Guids, MSCState) -> + Atom = lists:foldl( + fun (Guid, Atom1) when Atom1 =:= Atom -> + rabbit_msg_store:contains(Guid, MSCState) end, + Atom, Guids). + +msg_store_sync(Guids, MSCState) -> + Ref = make_ref(), + Self = self(), + ok = rabbit_msg_store:sync(Guids, fun () -> Self ! {sync, Ref} end, + MSCState), + receive + {sync, Ref} -> ok + after + 10000 -> + io:format("Sync from msg_store missing for guids ~p~n", [Guids]), + throw(timeout) + end. + +msg_store_read(Guids, MSCState) -> + lists:foldl(fun (Guid, MSCStateM) -> + {{ok, Guid}, MSCStateN} = rabbit_msg_store:read( + Guid, MSCStateM), + MSCStateN + end, MSCState, Guids). + +msg_store_write(Guids, MSCState) -> + ok = lists:foldl( + fun (Guid, ok) -> rabbit_msg_store:write(Guid, Guid, MSCState) end, + ok, Guids). + +msg_store_remove(Guids, MSCState) -> + rabbit_msg_store:remove(Guids, MSCState). + +msg_store_remove(MsgStore, Ref, Guids) -> + with_msg_store_client(MsgStore, Ref, + fun (MSCStateM) -> + ok = msg_store_remove(Guids, MSCStateM), + MSCStateM + end). + +with_msg_store_client(MsgStore, Ref, Fun) -> + rabbit_msg_store:client_terminate( + Fun(msg_store_client_init(MsgStore, Ref))). + +foreach_with_msg_store_client(MsgStore, Ref, Fun, L) -> + rabbit_msg_store:client_terminate( + lists:foldl(fun (Guid, MSCState) -> Fun(Guid, MSCState) end, + msg_store_client_init(MsgStore, Ref), L)). + +test_msg_store() -> + restart_msg_store_empty(), + Self = self(), + Guids = [guid_bin(M) || M <- lists:seq(1,100)], + {Guids1stHalf, Guids2ndHalf} = lists:split(50, Guids), + Ref = rabbit_guid:guid(), + MSCState = msg_store_client_init(?PERSISTENT_MSG_STORE, Ref), + %% check we don't contain any of the msgs we're about to publish + false = msg_store_contains(false, Guids, MSCState), + %% publish the first half + ok = msg_store_write(Guids1stHalf, MSCState), + %% sync on the first half + ok = msg_store_sync(Guids1stHalf, MSCState), + %% publish the second half + ok = msg_store_write(Guids2ndHalf, MSCState), + %% sync on the first half again - the msg_store will be dirty, but + %% we won't need the fsync + ok = msg_store_sync(Guids1stHalf, MSCState), + %% check they're all in there + true = msg_store_contains(true, Guids, MSCState), + %% publish the latter half twice so we hit the caching and ref count code + ok = msg_store_write(Guids2ndHalf, MSCState), + %% check they're still all in there + true = msg_store_contains(true, Guids, MSCState), + %% sync on the 2nd half, but do lots of individual syncs to try + %% and cause coalescing to happen + ok = lists:foldl( + fun (Guid, ok) -> rabbit_msg_store:sync( + [Guid], fun () -> Self ! {sync, Guid} end, + MSCState) + end, ok, Guids2ndHalf), + lists:foldl( + fun(Guid, ok) -> + receive + {sync, Guid} -> ok + after + 10000 -> + io:format("Sync from msg_store missing (guid: ~p)~n", + [Guid]), + throw(timeout) + end + end, ok, Guids2ndHalf), + %% it's very likely we're not dirty here, so the 1st half sync + %% should hit a different code path + ok = msg_store_sync(Guids1stHalf, MSCState), + %% read them all + MSCState1 = msg_store_read(Guids, MSCState), + %% read them all again - this will hit the cache, not disk + MSCState2 = msg_store_read(Guids, MSCState1), + %% remove them all + ok = rabbit_msg_store:remove(Guids, MSCState2), + %% check first half doesn't exist + false = msg_store_contains(false, Guids1stHalf, MSCState2), + %% check second half does exist + true = msg_store_contains(true, Guids2ndHalf, MSCState2), + %% read the second half again + MSCState3 = msg_store_read(Guids2ndHalf, MSCState2), + %% release the second half, just for fun (aka code coverage) + ok = rabbit_msg_store:release(Guids2ndHalf, MSCState3), + %% read the second half again, just for fun (aka code coverage) + MSCState4 = msg_store_read(Guids2ndHalf, MSCState3), + ok = rabbit_msg_store:client_terminate(MSCState4), + %% stop and restart, preserving every other msg in 2nd half + ok = rabbit_variable_queue:stop_msg_store(), + ok = rabbit_variable_queue:start_msg_store( + [], {fun ([]) -> finished; + ([Guid|GuidsTail]) + when length(GuidsTail) rem 2 == 0 -> + {Guid, 1, GuidsTail}; + ([Guid|GuidsTail]) -> + {Guid, 0, GuidsTail} + end, Guids2ndHalf}), + MSCState5 = msg_store_client_init(?PERSISTENT_MSG_STORE, Ref), + %% check we have the right msgs left + lists:foldl( + fun (Guid, Bool) -> + not(Bool = rabbit_msg_store:contains(Guid, MSCState5)) + end, false, Guids2ndHalf), + ok = rabbit_msg_store:client_terminate(MSCState5), + %% restart empty + restart_msg_store_empty(), + MSCState6 = msg_store_client_init(?PERSISTENT_MSG_STORE, Ref), + %% check we don't contain any of the msgs + false = msg_store_contains(false, Guids, MSCState6), + %% publish the first half again + ok = msg_store_write(Guids1stHalf, MSCState6), + %% this should force some sort of sync internally otherwise misread + ok = rabbit_msg_store:client_terminate( + msg_store_read(Guids1stHalf, MSCState6)), + MSCState7 = msg_store_client_init(?PERSISTENT_MSG_STORE, Ref), + ok = rabbit_msg_store:remove(Guids1stHalf, MSCState7), + ok = rabbit_msg_store:client_terminate(MSCState7), + %% restart empty + restart_msg_store_empty(), %% now safe to reuse guids + %% push a lot of msgs in... at least 100 files worth + {ok, FileSize} = application:get_env(rabbit, msg_store_file_size_limit), + PayloadSizeBits = 65536, + BigCount = trunc(100 * FileSize / (PayloadSizeBits div 8)), + GuidsBig = [guid_bin(X) || X <- lists:seq(1, BigCount)], + Payload = << 0:PayloadSizeBits >>, + ok = with_msg_store_client( + ?PERSISTENT_MSG_STORE, Ref, + fun (MSCStateM) -> + [ok = rabbit_msg_store:write(Guid, Payload, MSCStateM) || + Guid <- GuidsBig], + MSCStateM + end), + %% now read them to ensure we hit the fast client-side reading + ok = foreach_with_msg_store_client( + ?PERSISTENT_MSG_STORE, Ref, + fun (Guid, MSCStateM) -> + {{ok, Payload}, MSCStateN} = rabbit_msg_store:read( + Guid, MSCStateM), + MSCStateN + end, GuidsBig), + %% .., then 3s by 1... + ok = msg_store_remove(?PERSISTENT_MSG_STORE, Ref, + [guid_bin(X) || X <- lists:seq(BigCount, 1, -3)]), + %% .., then remove 3s by 2, from the young end first. This hits + %% GC (under 50% good data left, but no empty files. Must GC). + ok = msg_store_remove(?PERSISTENT_MSG_STORE, Ref, + [guid_bin(X) || X <- lists:seq(BigCount-1, 1, -3)]), + %% .., then remove 3s by 3, from the young end first. This hits + %% GC... + ok = msg_store_remove(?PERSISTENT_MSG_STORE, Ref, + [guid_bin(X) || X <- lists:seq(BigCount-2, 1, -3)]), + %% ensure empty + ok = with_msg_store_client( + ?PERSISTENT_MSG_STORE, Ref, + fun (MSCStateM) -> + false = msg_store_contains(false, GuidsBig, MSCStateM), + MSCStateM + end), + %% restart empty + restart_msg_store_empty(), + passed. + +queue_name(Name) -> + rabbit_misc:r(<<"/">>, queue, Name). + +test_queue() -> + queue_name(<<"test">>). + +init_test_queue() -> + TestQueue = test_queue(), + Terms = rabbit_queue_index:shutdown_terms(TestQueue), + PRef = proplists:get_value(persistent_ref, Terms, rabbit_guid:guid()), + PersistentClient = msg_store_client_init(?PERSISTENT_MSG_STORE, PRef), + Res = rabbit_queue_index:recover( + TestQueue, Terms, false, + fun (Guid) -> + rabbit_msg_store:contains(Guid, PersistentClient) + end, + fun nop/1), + ok = rabbit_msg_store:client_delete_and_terminate(PersistentClient), + Res. + +restart_test_queue(Qi) -> + _ = rabbit_queue_index:terminate([], Qi), + ok = rabbit_variable_queue:stop(), + ok = rabbit_variable_queue:start([test_queue()]), + init_test_queue(). + +empty_test_queue() -> + ok = rabbit_variable_queue:stop(), + ok = rabbit_variable_queue:start([]), + {0, Qi} = init_test_queue(), + _ = rabbit_queue_index:delete_and_terminate(Qi), + ok. + +with_empty_test_queue(Fun) -> + ok = empty_test_queue(), + {0, Qi} = init_test_queue(), + rabbit_queue_index:delete_and_terminate(Fun(Qi)). + +queue_index_publish(SeqIds, Persistent, Qi) -> + Ref = rabbit_guid:guid(), + MsgStore = case Persistent of + true -> ?PERSISTENT_MSG_STORE; + false -> ?TRANSIENT_MSG_STORE + end, + MSCState = msg_store_client_init(MsgStore, Ref), + {A, B = [{_SeqId, LastGuidWritten} | _]} = + lists:foldl( + fun (SeqId, {QiN, SeqIdsGuidsAcc}) -> + Guid = rabbit_guid:guid(), + QiM = rabbit_queue_index:publish( + Guid, SeqId, #message_properties{}, Persistent, QiN), + ok = rabbit_msg_store:write(Guid, Guid, MSCState), + {QiM, [{SeqId, Guid} | SeqIdsGuidsAcc]} + end, {Qi, []}, SeqIds), + %% do this just to force all of the publishes through to the msg_store: + true = rabbit_msg_store:contains(LastGuidWritten, MSCState), + ok = rabbit_msg_store:client_delete_and_terminate(MSCState), + {A, B}. + +verify_read_with_published(_Delivered, _Persistent, [], _) -> + ok; +verify_read_with_published(Delivered, Persistent, + [{Guid, SeqId, _Props, Persistent, Delivered}|Read], + [{SeqId, Guid}|Published]) -> + verify_read_with_published(Delivered, Persistent, Read, Published); +verify_read_with_published(_Delivered, _Persistent, _Read, _Published) -> + ko. + +test_queue_index_props() -> + with_empty_test_queue( + fun(Qi0) -> + Guid = rabbit_guid:guid(), + Props = #message_properties{expiry=12345}, + Qi1 = rabbit_queue_index:publish(Guid, 1, Props, true, Qi0), + {[{Guid, 1, Props, _, _}], Qi2} = + rabbit_queue_index:read(1, 2, Qi1), + Qi2 + end), + + ok = rabbit_variable_queue:stop(), + ok = rabbit_variable_queue:start([]), + + passed. + +test_queue_index() -> + SegmentSize = rabbit_queue_index:next_segment_boundary(0), + TwoSegs = SegmentSize + SegmentSize, + MostOfASegment = trunc(SegmentSize*0.75), + SeqIdsA = lists:seq(0, MostOfASegment-1), + SeqIdsB = lists:seq(MostOfASegment, 2*MostOfASegment), + SeqIdsC = lists:seq(0, trunc(SegmentSize/2)), + SeqIdsD = lists:seq(0, SegmentSize*4), + + with_empty_test_queue( + fun (Qi0) -> + {0, 0, Qi1} = rabbit_queue_index:bounds(Qi0), + {Qi2, SeqIdsGuidsA} = queue_index_publish(SeqIdsA, false, Qi1), + {0, SegmentSize, Qi3} = rabbit_queue_index:bounds(Qi2), + {ReadA, Qi4} = rabbit_queue_index:read(0, SegmentSize, Qi3), + ok = verify_read_with_published(false, false, ReadA, + lists:reverse(SeqIdsGuidsA)), + %% should get length back as 0, as all the msgs were transient + {0, Qi6} = restart_test_queue(Qi4), + {0, 0, Qi7} = rabbit_queue_index:bounds(Qi6), + {Qi8, SeqIdsGuidsB} = queue_index_publish(SeqIdsB, true, Qi7), + {0, TwoSegs, Qi9} = rabbit_queue_index:bounds(Qi8), + {ReadB, Qi10} = rabbit_queue_index:read(0, SegmentSize, Qi9), + ok = verify_read_with_published(false, true, ReadB, + lists:reverse(SeqIdsGuidsB)), + %% should get length back as MostOfASegment + LenB = length(SeqIdsB), + {LenB, Qi12} = restart_test_queue(Qi10), + {0, TwoSegs, Qi13} = rabbit_queue_index:bounds(Qi12), + Qi14 = rabbit_queue_index:deliver(SeqIdsB, Qi13), + {ReadC, Qi15} = rabbit_queue_index:read(0, SegmentSize, Qi14), + ok = verify_read_with_published(true, true, ReadC, + lists:reverse(SeqIdsGuidsB)), + Qi16 = rabbit_queue_index:ack(SeqIdsB, Qi15), + Qi17 = rabbit_queue_index:flush(Qi16), + %% Everything will have gone now because #pubs == #acks + {0, 0, Qi18} = rabbit_queue_index:bounds(Qi17), + %% should get length back as 0 because all persistent + %% msgs have been acked + {0, Qi19} = restart_test_queue(Qi18), + Qi19 + end), + + %% These next bits are just to hit the auto deletion of segment files. + %% First, partials: + %% a) partial pub+del+ack, then move to new segment + with_empty_test_queue( + fun (Qi0) -> + {Qi1, _SeqIdsGuidsC} = queue_index_publish(SeqIdsC, + false, Qi0), + Qi2 = rabbit_queue_index:deliver(SeqIdsC, Qi1), + Qi3 = rabbit_queue_index:ack(SeqIdsC, Qi2), + Qi4 = rabbit_queue_index:flush(Qi3), + {Qi5, _SeqIdsGuidsC1} = queue_index_publish([SegmentSize], + false, Qi4), + Qi5 + end), + + %% b) partial pub+del, then move to new segment, then ack all in old segment + with_empty_test_queue( + fun (Qi0) -> + {Qi1, _SeqIdsGuidsC2} = queue_index_publish(SeqIdsC, + false, Qi0), + Qi2 = rabbit_queue_index:deliver(SeqIdsC, Qi1), + {Qi3, _SeqIdsGuidsC3} = queue_index_publish([SegmentSize], + false, Qi2), + Qi4 = rabbit_queue_index:ack(SeqIdsC, Qi3), + rabbit_queue_index:flush(Qi4) + end), + + %% c) just fill up several segments of all pubs, then +dels, then +acks + with_empty_test_queue( + fun (Qi0) -> + {Qi1, _SeqIdsGuidsD} = queue_index_publish(SeqIdsD, + false, Qi0), + Qi2 = rabbit_queue_index:deliver(SeqIdsD, Qi1), + Qi3 = rabbit_queue_index:ack(SeqIdsD, Qi2), + rabbit_queue_index:flush(Qi3) + end), + + %% d) get messages in all states to a segment, then flush, then do + %% the same again, don't flush and read. This will hit all + %% possibilities in combining the segment with the journal. + with_empty_test_queue( + fun (Qi0) -> + {Qi1, [Seven,Five,Four|_]} = queue_index_publish([0,1,2,4,5,7], + false, Qi0), + Qi2 = rabbit_queue_index:deliver([0,1,4], Qi1), + Qi3 = rabbit_queue_index:ack([0], Qi2), + Qi4 = rabbit_queue_index:flush(Qi3), + {Qi5, [Eight,Six|_]} = queue_index_publish([3,6,8], false, Qi4), + Qi6 = rabbit_queue_index:deliver([2,3,5,6], Qi5), + Qi7 = rabbit_queue_index:ack([1,2,3], Qi6), + {[], Qi8} = rabbit_queue_index:read(0, 4, Qi7), + {ReadD, Qi9} = rabbit_queue_index:read(4, 7, Qi8), + ok = verify_read_with_published(true, false, ReadD, + [Four, Five, Six]), + {ReadE, Qi10} = rabbit_queue_index:read(7, 9, Qi9), + ok = verify_read_with_published(false, false, ReadE, + [Seven, Eight]), + Qi10 + end), + + %% e) as for (d), but use terminate instead of read, which will + %% exercise journal_minus_segment, not segment_plus_journal. + with_empty_test_queue( + fun (Qi0) -> + {Qi1, _SeqIdsGuidsE} = queue_index_publish([0,1,2,4,5,7], + true, Qi0), + Qi2 = rabbit_queue_index:deliver([0,1,4], Qi1), + Qi3 = rabbit_queue_index:ack([0], Qi2), + {5, Qi4} = restart_test_queue(Qi3), + {Qi5, _SeqIdsGuidsF} = queue_index_publish([3,6,8], true, Qi4), + Qi6 = rabbit_queue_index:deliver([2,3,5,6], Qi5), + Qi7 = rabbit_queue_index:ack([1,2,3], Qi6), + {5, Qi8} = restart_test_queue(Qi7), + Qi8 + end), + + ok = rabbit_variable_queue:stop(), + ok = rabbit_variable_queue:start([]), + + passed. + +variable_queue_publish(IsPersistent, Count, VQ) -> + lists:foldl( + fun (_N, VQN) -> + rabbit_variable_queue:publish( + rabbit_basic:message( + rabbit_misc:r(<<>>, exchange, <<>>), + <<>>, #'P_basic'{delivery_mode = case IsPersistent of + true -> 2; + false -> 1 + end}, <<>>), + #message_properties{}, VQN) + end, VQ, lists:seq(1, Count)). + +variable_queue_fetch(Count, IsPersistent, IsDelivered, Len, VQ) -> + lists:foldl(fun (N, {VQN, AckTagsAcc}) -> + Rem = Len - N, + {{#basic_message { is_persistent = IsPersistent }, + IsDelivered, AckTagN, Rem}, VQM} = + rabbit_variable_queue:fetch(true, VQN), + {VQM, [AckTagN | AckTagsAcc]} + end, {VQ, []}, lists:seq(1, Count)). + +assert_prop(List, Prop, Value) -> + Value = proplists:get_value(Prop, List). + +assert_props(List, PropVals) -> + [assert_prop(List, Prop, Value) || {Prop, Value} <- PropVals]. + +with_fresh_variable_queue(Fun) -> + ok = empty_test_queue(), + VQ = rabbit_variable_queue:init(test_queue(), true, false, + fun nop/2, fun nop/1), + S0 = rabbit_variable_queue:status(VQ), + assert_props(S0, [{q1, 0}, {q2, 0}, + {delta, {delta, undefined, 0, undefined}}, + {q3, 0}, {q4, 0}, + {len, 0}]), + _ = rabbit_variable_queue:delete_and_terminate(Fun(VQ)), + passed. + +test_variable_queue() -> + [passed = with_fresh_variable_queue(F) || + F <- [fun test_variable_queue_dynamic_duration_change/1, + fun test_variable_queue_partial_segments_delta_thing/1, + fun test_variable_queue_all_the_bits_not_covered_elsewhere1/1, + fun test_variable_queue_all_the_bits_not_covered_elsewhere2/1, + fun test_dropwhile/1, + fun test_variable_queue_ack_limiting/1]], + passed. + +test_variable_queue_ack_limiting(VQ0) -> + %% start by sending in a bunch of messages + Len = 1024, + VQ1 = variable_queue_publish(false, Len, VQ0), + + %% squeeze and relax queue + Churn = Len div 32, + VQ2 = publish_fetch_and_ack(Churn, Len, VQ1), + + %% update stats for duration + {_Duration, VQ3} = rabbit_variable_queue:ram_duration(VQ2), + + %% fetch half the messages + {VQ4, _AckTags} = variable_queue_fetch(Len div 2, false, false, Len, VQ3), + + VQ5 = check_variable_queue_status(VQ4, [{len , Len div 2}, + {ram_ack_count, Len div 2}, + {ram_msg_count, Len div 2}]), + + %% ensure all acks go to disk on 0 duration target + VQ6 = check_variable_queue_status( + rabbit_variable_queue:set_ram_duration_target(0, VQ5), + [{len, Len div 2}, + {target_ram_count, 0}, + {ram_msg_count, 0}, + {ram_ack_count, 0}]), + + VQ6. + +test_dropwhile(VQ0) -> + Count = 10, + + %% add messages with sequential expiry + VQ1 = lists:foldl( + fun (N, VQN) -> + rabbit_variable_queue:publish( + rabbit_basic:message( + rabbit_misc:r(<<>>, exchange, <<>>), + <<>>, #'P_basic'{}, <<>>), + #message_properties{expiry = N}, VQN) + end, VQ0, lists:seq(1, Count)), + + %% drop the first 5 messages + VQ2 = rabbit_variable_queue:dropwhile( + fun(#message_properties { expiry = Expiry }) -> + Expiry =< 5 + end, VQ1), + + %% fetch five now + VQ3 = lists:foldl(fun (_N, VQN) -> + {{#basic_message{}, _, _, _}, VQM} = + rabbit_variable_queue:fetch(false, VQN), + VQM + end, VQ2, lists:seq(6, Count)), + + %% should be empty now + {empty, VQ4} = rabbit_variable_queue:fetch(false, VQ3), + + VQ4. + +test_variable_queue_dynamic_duration_change(VQ0) -> + SegmentSize = rabbit_queue_index:next_segment_boundary(0), + + %% start by sending in a couple of segments worth + Len = 2*SegmentSize, + VQ1 = variable_queue_publish(false, Len, VQ0), + %% squeeze and relax queue + Churn = Len div 32, + VQ2 = publish_fetch_and_ack(Churn, Len, VQ1), + + {Duration, VQ3} = rabbit_variable_queue:ram_duration(VQ2), + VQ7 = lists:foldl( + fun (Duration1, VQ4) -> + {_Duration, VQ5} = rabbit_variable_queue:ram_duration(VQ4), + io:format("~p:~n~p~n", + [Duration1, rabbit_variable_queue:status(VQ5)]), + VQ6 = rabbit_variable_queue:set_ram_duration_target( + Duration1, VQ5), + publish_fetch_and_ack(Churn, Len, VQ6) + end, VQ3, [Duration / 4, 0, Duration / 4, infinity]), + + %% drain + {VQ8, AckTags} = variable_queue_fetch(Len, false, false, Len, VQ7), + VQ9 = rabbit_variable_queue:ack(AckTags, VQ8), + {empty, VQ10} = rabbit_variable_queue:fetch(true, VQ9), + + VQ10. + +publish_fetch_and_ack(0, _Len, VQ0) -> + VQ0; +publish_fetch_and_ack(N, Len, VQ0) -> + VQ1 = variable_queue_publish(false, 1, VQ0), + {{_Msg, false, AckTag, Len}, VQ2} = rabbit_variable_queue:fetch(true, VQ1), + VQ3 = rabbit_variable_queue:ack([AckTag], VQ2), + publish_fetch_and_ack(N-1, Len, VQ3). + +test_variable_queue_partial_segments_delta_thing(VQ0) -> + SegmentSize = rabbit_queue_index:next_segment_boundary(0), + HalfSegment = SegmentSize div 2, + OneAndAHalfSegment = SegmentSize + HalfSegment, + VQ1 = variable_queue_publish(true, OneAndAHalfSegment, VQ0), + {_Duration, VQ2} = rabbit_variable_queue:ram_duration(VQ1), + VQ3 = check_variable_queue_status( + rabbit_variable_queue:set_ram_duration_target(0, VQ2), + %% one segment in q3 as betas, and half a segment in delta + [{delta, {delta, SegmentSize, HalfSegment, OneAndAHalfSegment}}, + {q3, SegmentSize}, + {len, SegmentSize + HalfSegment}]), + VQ4 = rabbit_variable_queue:set_ram_duration_target(infinity, VQ3), + VQ5 = check_variable_queue_status( + variable_queue_publish(true, 1, VQ4), + %% one alpha, but it's in the same segment as the deltas + [{q1, 1}, + {delta, {delta, SegmentSize, HalfSegment, OneAndAHalfSegment}}, + {q3, SegmentSize}, + {len, SegmentSize + HalfSegment + 1}]), + {VQ6, AckTags} = variable_queue_fetch(SegmentSize, true, false, + SegmentSize + HalfSegment + 1, VQ5), + VQ7 = check_variable_queue_status( + VQ6, + %% the half segment should now be in q3 as betas + [{q1, 1}, + {delta, {delta, undefined, 0, undefined}}, + {q3, HalfSegment}, + {len, HalfSegment + 1}]), + {VQ8, AckTags1} = variable_queue_fetch(HalfSegment + 1, true, false, + HalfSegment + 1, VQ7), + VQ9 = rabbit_variable_queue:ack(AckTags ++ AckTags1, VQ8), + %% should be empty now + {empty, VQ10} = rabbit_variable_queue:fetch(true, VQ9), + VQ10. + +check_variable_queue_status(VQ0, Props) -> + VQ1 = variable_queue_wait_for_shuffling_end(VQ0), + S = rabbit_variable_queue:status(VQ1), + io:format("~p~n", [S]), + assert_props(S, Props), + VQ1. + +variable_queue_wait_for_shuffling_end(VQ) -> + case rabbit_variable_queue:needs_idle_timeout(VQ) of + true -> variable_queue_wait_for_shuffling_end( + rabbit_variable_queue:idle_timeout(VQ)); + false -> VQ + end. + +test_variable_queue_all_the_bits_not_covered_elsewhere1(VQ0) -> + Count = 2 * rabbit_queue_index:next_segment_boundary(0), + VQ1 = variable_queue_publish(true, Count, VQ0), + VQ2 = variable_queue_publish(false, Count, VQ1), + VQ3 = rabbit_variable_queue:set_ram_duration_target(0, VQ2), + {VQ4, _AckTags} = variable_queue_fetch(Count, true, false, + Count + Count, VQ3), + {VQ5, _AckTags1} = variable_queue_fetch(Count, false, false, + Count, VQ4), + _VQ6 = rabbit_variable_queue:terminate(VQ5), + VQ7 = rabbit_variable_queue:init(test_queue(), true, true, + fun nop/2, fun nop/1), + {{_Msg1, true, _AckTag1, Count1}, VQ8} = + rabbit_variable_queue:fetch(true, VQ7), + VQ9 = variable_queue_publish(false, 1, VQ8), + VQ10 = rabbit_variable_queue:set_ram_duration_target(0, VQ9), + {VQ11, _AckTags2} = variable_queue_fetch(Count1, true, true, Count, VQ10), + {VQ12, _AckTags3} = variable_queue_fetch(1, false, false, 1, VQ11), + VQ12. + +test_variable_queue_all_the_bits_not_covered_elsewhere2(VQ0) -> + VQ1 = rabbit_variable_queue:set_ram_duration_target(0, VQ0), + VQ2 = variable_queue_publish(false, 4, VQ1), + {VQ3, AckTags} = variable_queue_fetch(2, false, false, 4, VQ2), + VQ4 = rabbit_variable_queue:requeue(AckTags, fun(X) -> X end, VQ3), + VQ5 = rabbit_variable_queue:idle_timeout(VQ4), + _VQ6 = rabbit_variable_queue:terminate(VQ5), + VQ7 = rabbit_variable_queue:init(test_queue(), true, true, + fun nop/2, fun nop/1), + {empty, VQ8} = rabbit_variable_queue:fetch(false, VQ7), + VQ8. + +test_queue_recover() -> + Count = 2 * rabbit_queue_index:next_segment_boundary(0), + TxID = rabbit_guid:guid(), + {new, #amqqueue { pid = QPid, name = QName }} = + rabbit_amqqueue:declare(test_queue(), true, false, [], none), + [begin + Msg = rabbit_basic:message(rabbit_misc:r(<<>>, exchange, <<>>), + <<>>, #'P_basic'{delivery_mode = 2}, <<>>), + Delivery = #delivery{mandatory = false, immediate = false, txn = TxID, + sender = self(), message = Msg}, + true = rabbit_amqqueue:deliver(QPid, Delivery) + end || _ <- lists:seq(1, Count)], + rabbit_amqqueue:commit_all([QPid], TxID, self()), + exit(QPid, kill), + MRef = erlang:monitor(process, QPid), + receive {'DOWN', MRef, process, QPid, _Info} -> ok + after 10000 -> exit(timeout_waiting_for_queue_death) + end, + rabbit_amqqueue:stop(), + ok = rabbit_amqqueue:start(), + rabbit_amqqueue:with_or_die( + QName, + fun (Q1 = #amqqueue { pid = QPid1 }) -> + CountMinusOne = Count - 1, + {ok, CountMinusOne, {QName, QPid1, _AckTag, true, _Msg}} = + rabbit_amqqueue:basic_get(Q1, self(), false), + exit(QPid1, shutdown), + VQ1 = rabbit_variable_queue:init(QName, true, true, + fun nop/2, fun nop/1), + {{_Msg1, true, _AckTag1, CountMinusOne}, VQ2} = + rabbit_variable_queue:fetch(true, VQ1), + _VQ3 = rabbit_variable_queue:delete_and_terminate(VQ2), + rabbit_amqqueue:internal_delete(QName) + end), + passed. + +test_variable_queue_delete_msg_store_files_callback() -> + ok = restart_msg_store_empty(), + {new, #amqqueue { pid = QPid, name = QName } = Q} = + rabbit_amqqueue:declare(test_queue(), true, false, [], none), + TxID = rabbit_guid:guid(), + Payload = <<0:8388608>>, %% 1MB + Count = 30, + [begin + Msg = rabbit_basic:message( + rabbit_misc:r(<<>>, exchange, <<>>), + <<>>, #'P_basic'{delivery_mode = 2}, Payload), + Delivery = #delivery{mandatory = false, immediate = false, txn = TxID, + sender = self(), message = Msg}, + true = rabbit_amqqueue:deliver(QPid, Delivery) + end || _ <- lists:seq(1, Count)], + rabbit_amqqueue:commit_all([QPid], TxID, self()), + rabbit_amqqueue:set_ram_duration_target(QPid, 0), + + CountMinusOne = Count - 1, + {ok, CountMinusOne, {QName, QPid, _AckTag, false, _Msg}} = + rabbit_amqqueue:basic_get(Q, self(), true), + {ok, CountMinusOne} = rabbit_amqqueue:purge(Q), + + %% give the queue a second to receive the close_fds callback msg + timer:sleep(1000), + + rabbit_amqqueue:delete(Q, false, false), + passed. + +test_configurable_server_properties() -> + %% List of the names of the built-in properties do we expect to find + BuiltInPropNames = [<<"product">>, <<"version">>, <<"platform">>, + <<"copyright">>, <<"information">>], + + %% Verify that the built-in properties are initially present + ActualPropNames = [Key || + {Key, longstr, _} <- rabbit_reader:server_properties()], + true = lists:all(fun (X) -> lists:member(X, ActualPropNames) end, + BuiltInPropNames), + + %% Get the initial server properties configured in the environment + {ok, ServerProperties} = application:get_env(rabbit, server_properties), + + %% Helper functions + ConsProp = fun (X) -> application:set_env(rabbit, + server_properties, + [X | ServerProperties]) end, + IsPropPresent = fun (X) -> lists:member(X, + rabbit_reader:server_properties()) + end, + + %% Add a wholly new property of the simplified {KeyAtom, StringValue} form + NewSimplifiedProperty = {NewHareKey, NewHareVal} = {hare, "soup"}, + ConsProp(NewSimplifiedProperty), + %% Do we find hare soup, appropriately formatted in the generated properties? + ExpectedHareImage = {list_to_binary(atom_to_list(NewHareKey)), + longstr, + list_to_binary(NewHareVal)}, + true = IsPropPresent(ExpectedHareImage), + + %% Add a wholly new property of the {BinaryKey, Type, Value} form + %% and check for it + NewProperty = {<<"new-bin-key">>, signedint, -1}, + ConsProp(NewProperty), + %% Do we find the new property? + true = IsPropPresent(NewProperty), + + %% Add a property that clobbers a built-in, and verify correct clobbering + {NewVerKey, NewVerVal} = NewVersion = {version, "X.Y.Z."}, + {BinNewVerKey, BinNewVerVal} = {list_to_binary(atom_to_list(NewVerKey)), + list_to_binary(NewVerVal)}, + ConsProp(NewVersion), + ClobberedServerProps = rabbit_reader:server_properties(), + %% Is the clobbering insert present? + true = IsPropPresent({BinNewVerKey, longstr, BinNewVerVal}), + %% Is the clobbering insert the only thing with the clobbering key? + [{BinNewVerKey, longstr, BinNewVerVal}] = + [E || {K, longstr, _V} = E <- ClobberedServerProps, K =:= BinNewVerKey], + + application:set_env(rabbit, server_properties, ServerProperties), + passed. + +nop(_) -> ok. +nop(_, _) -> ok. diff -Nru rabbitmq-server-1.7.2/src/rabbit_tests_event_receiver.erl rabbitmq-server-2.3.1/src/rabbit_tests_event_receiver.erl --- rabbitmq-server-1.7.2/src/rabbit_tests_event_receiver.erl 1970-01-01 00:00:00.000000000 +0000 +++ rabbitmq-server-2.3.1/src/rabbit_tests_event_receiver.erl 2011-02-03 12:47:35.000000000 +0000 @@ -0,0 +1,51 @@ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License +%% at http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and +%% limitations under the License. +%% +%% The Original Code is RabbitMQ. +%% +%% The Initial Developer of the Original Code is VMware, Inc. +%% Copyright (c) 2007-2011 VMware, Inc. All rights reserved. +%% + +-module(rabbit_tests_event_receiver). + +-export([start/1, stop/0]). + +-export([init/1, handle_call/2, handle_event/2, handle_info/2, + terminate/2, code_change/3]). + +start(Pid) -> + gen_event:add_handler(rabbit_event, ?MODULE, [Pid]). + +stop() -> + gen_event:delete_handler(rabbit_event, ?MODULE, []). + +%%---------------------------------------------------------------------------- + +init([Pid]) -> + {ok, Pid}. + +handle_call(_Request, Pid) -> + {ok, not_understood, Pid}. + +handle_event(Event, Pid) -> + Pid ! Event, + {ok, Pid}. + +handle_info(_Info, Pid) -> + {ok, Pid}. + +terminate(_Arg, _Pid) -> + ok. + +code_change(_OldVsn, Pid, _Extra) -> + {ok, Pid}. + +%%---------------------------------------------------------------------------- diff -Nru rabbitmq-server-1.7.2/src/rabbit_tracer.erl rabbitmq-server-2.3.1/src/rabbit_tracer.erl --- rabbitmq-server-1.7.2/src/rabbit_tracer.erl 2010-02-15 16:01:03.000000000 +0000 +++ rabbitmq-server-2.3.1/src/rabbit_tracer.erl 1970-01-01 00:00:00.000000000 +0000 @@ -1,50 +0,0 @@ -%% The contents of this file are subject to the Mozilla Public License -%% Version 1.1 (the "License"); you may not use this file except in -%% compliance with the License. You may obtain a copy of the License at -%% http://www.mozilla.org/MPL/ -%% -%% Software distributed under the License is distributed on an "AS IS" -%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the -%% License for the specific language governing rights and limitations -%% under the License. -%% -%% The Original Code is RabbitMQ. -%% -%% The Initial Developers of the Original Code are LShift Ltd, -%% Cohesive Financial Technologies LLC, and Rabbit Technologies Ltd. -%% -%% Portions created before 22-Nov-2008 00:00:00 GMT by LShift Ltd, -%% Cohesive Financial Technologies LLC, or Rabbit Technologies Ltd -%% are Copyright (C) 2007-2008 LShift Ltd, Cohesive Financial -%% Technologies LLC, and Rabbit Technologies Ltd. -%% -%% Portions created by LShift Ltd are Copyright (C) 2007-2010 LShift -%% Ltd. Portions created by Cohesive Financial Technologies LLC are -%% Copyright (C) 2007-2010 Cohesive Financial Technologies -%% LLC. Portions created by Rabbit Technologies Ltd are Copyright -%% (C) 2007-2010 Rabbit Technologies Ltd. -%% -%% All Rights Reserved. -%% -%% Contributor(s): ______________________________________. -%% - --module(rabbit_tracer). --export([start/0]). - --import(erlang). - -start() -> - spawn(fun mainloop/0), - ok. - -mainloop() -> - erlang:trace(new, true, [all]), - mainloop1(). - -mainloop1() -> - receive - Msg -> - rabbit_log:info("TRACE: ~p~n", [Msg]) - end, - mainloop1(). diff -Nru rabbitmq-server-1.7.2/src/rabbit_types.erl rabbitmq-server-2.3.1/src/rabbit_types.erl --- rabbitmq-server-1.7.2/src/rabbit_types.erl 1970-01-01 00:00:00.000000000 +0000 +++ rabbitmq-server-2.3.1/src/rabbit_types.erl 2011-02-03 12:47:35.000000000 +0000 @@ -0,0 +1,160 @@ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License +%% at http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and +%% limitations under the License. +%% +%% The Original Code is RabbitMQ. +%% +%% The Initial Developer of the Original Code is VMware, Inc. +%% Copyright (c) 2007-2011 VMware, Inc. All rights reserved. +%% + +-module(rabbit_types). + +-include("rabbit.hrl"). + +-ifdef(use_specs). + +-export_type([txn/0, maybe/1, info/0, infos/0, info_key/0, info_keys/0, + message/0, basic_message/0, + delivery/0, content/0, decoded_content/0, undecoded_content/0, + unencoded_content/0, encoded_content/0, message_properties/0, + vhost/0, ctag/0, amqp_error/0, r/1, r2/2, r3/3, listener/0, + binding/0, binding_source/0, binding_destination/0, + amqqueue/0, exchange/0, + connection/0, protocol/0, user/0, internal_user/0, + username/0, password/0, password_hash/0, ok/1, error/1, + ok_or_error/1, ok_or_error2/2, ok_pid_or_error/0, channel_exit/0, + connection_exit/0]). + +-type(channel_exit() :: no_return()). +-type(connection_exit() :: no_return()). + +-type(maybe(T) :: T | 'none'). +-type(vhost() :: binary()). +-type(ctag() :: binary()). + +%% TODO: make this more precise by tying specific class_ids to +%% specific properties +-type(undecoded_content() :: + #content{class_id :: rabbit_framing:amqp_class_id(), + properties :: 'none', + properties_bin :: binary(), + payload_fragments_rev :: [binary()]} | + #content{class_id :: rabbit_framing:amqp_class_id(), + properties :: rabbit_framing:amqp_property_record(), + properties_bin :: 'none', + payload_fragments_rev :: [binary()]}). +-type(unencoded_content() :: undecoded_content()). +-type(decoded_content() :: + #content{class_id :: rabbit_framing:amqp_class_id(), + properties :: rabbit_framing:amqp_property_record(), + properties_bin :: maybe(binary()), + payload_fragments_rev :: [binary()]}). +-type(encoded_content() :: + #content{class_id :: rabbit_framing:amqp_class_id(), + properties :: maybe(rabbit_framing:amqp_property_record()), + properties_bin :: binary(), + payload_fragments_rev :: [binary()]}). +-type(content() :: undecoded_content() | decoded_content()). +-type(basic_message() :: + #basic_message{exchange_name :: rabbit_exchange:name(), + routing_key :: rabbit_router:routing_key(), + content :: content(), + guid :: rabbit_guid:guid(), + is_persistent :: boolean()}). +-type(message() :: basic_message()). +-type(delivery() :: + #delivery{mandatory :: boolean(), + immediate :: boolean(), + txn :: maybe(txn()), + sender :: pid(), + message :: message()}). +-type(message_properties() :: + #message_properties{expiry :: pos_integer() | 'undefined', + needs_confirming :: boolean()}). + +%% this is really an abstract type, but dialyzer does not support them +-type(txn() :: rabbit_guid:guid()). + +-type(info_key() :: atom()). +-type(info_keys() :: [info_key()]). + +-type(info() :: {info_key(), any()}). +-type(infos() :: [info()]). + +-type(amqp_error() :: + #amqp_error{name :: rabbit_framing:amqp_exception(), + explanation :: string(), + method :: rabbit_framing:amqp_method_name()}). + +-type(r(Kind) :: + r2(vhost(), Kind)). +-type(r2(VirtualHost, Kind) :: + r3(VirtualHost, Kind, rabbit_misc:resource_name())). +-type(r3(VirtualHost, Kind, Name) :: + #resource{virtual_host :: VirtualHost, + kind :: Kind, + name :: Name}). + +-type(listener() :: + #listener{node :: node(), + protocol :: atom(), + host :: rabbit_networking:hostname(), + port :: rabbit_networking:ip_port()}). + +-type(binding_source() :: rabbit_exchange:name()). +-type(binding_destination() :: rabbit_amqqueue:name() | rabbit_exchange:name()). + +-type(binding() :: + #binding{source :: rabbit_exchange:name(), + destination :: binding_destination(), + key :: rabbit_binding:key(), + args :: rabbit_framing:amqp_table()}). + +-type(amqqueue() :: + #amqqueue{name :: rabbit_amqqueue:name(), + durable :: boolean(), + auto_delete :: boolean(), + exclusive_owner :: rabbit_types:maybe(pid()), + arguments :: rabbit_framing:amqp_table(), + pid :: rabbit_types:maybe(pid())}). + +-type(exchange() :: + #exchange{name :: rabbit_exchange:name(), + type :: rabbit_exchange:type(), + durable :: boolean(), + auto_delete :: boolean(), + arguments :: rabbit_framing:amqp_table()}). + +-type(connection() :: pid()). + +-type(protocol() :: rabbit_framing:protocol()). + +-type(user() :: + #user{username :: username(), + is_admin :: boolean(), + auth_backend :: atom(), + impl :: any()}). + +-type(internal_user() :: + #internal_user{username :: username(), + password_hash :: password_hash(), + is_admin :: boolean()}). + +-type(username() :: binary()). +-type(password() :: binary()). +-type(password_hash() :: binary()). + +-type(ok(A) :: {'ok', A}). +-type(error(A) :: {'error', A}). +-type(ok_or_error(A) :: 'ok' | error(A)). +-type(ok_or_error2(A, B) :: ok(A) | error(B)). +-type(ok_pid_or_error() :: ok_or_error2(pid(), any())). + +-endif. % use_specs diff -Nru rabbitmq-server-1.7.2/src/rabbit_upgrade.erl rabbitmq-server-2.3.1/src/rabbit_upgrade.erl --- rabbitmq-server-1.7.2/src/rabbit_upgrade.erl 1970-01-01 00:00:00.000000000 +0000 +++ rabbitmq-server-2.3.1/src/rabbit_upgrade.erl 2011-02-03 12:47:35.000000000 +0000 @@ -0,0 +1,169 @@ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License +%% at http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and +%% limitations under the License. +%% +%% The Original Code is RabbitMQ. +%% +%% The Initial Developer of the Original Code is VMware, Inc. +%% Copyright (c) 2007-2011 VMware, Inc. All rights reserved. +%% + +-module(rabbit_upgrade). + +-export([maybe_upgrade/0, read_version/0, write_version/0, desired_version/0]). + +-include("rabbit.hrl"). + +-define(VERSION_FILENAME, "schema_version"). +-define(LOCK_FILENAME, "schema_upgrade_lock"). + +%% ------------------------------------------------------------------- + +-ifdef(use_specs). + +-type(step() :: atom()). +-type(version() :: [step()]). + +-spec(maybe_upgrade/0 :: () -> 'ok' | 'version_not_available'). +-spec(read_version/0 :: () -> rabbit_types:ok_or_error2(version(), any())). +-spec(write_version/0 :: () -> 'ok'). +-spec(desired_version/0 :: () -> version()). + +-endif. + +%% ------------------------------------------------------------------- + +%% Try to upgrade the schema. If no information on the existing schema +%% could be found, do nothing. rabbit_mnesia:check_schema_integrity() +%% will catch the problem. +maybe_upgrade() -> + case read_version() of + {ok, CurrentHeads} -> + with_upgrade_graph( + fun (G) -> + case unknown_heads(CurrentHeads, G) of + [] -> case upgrades_to_apply(CurrentHeads, G) of + [] -> ok; + Upgrades -> apply_upgrades(Upgrades) + end; + Unknown -> throw({error, + {future_upgrades_found, Unknown}}) + end + end); + {error, enoent} -> + version_not_available + end. + +read_version() -> + case rabbit_misc:read_term_file(schema_filename()) of + {ok, [Heads]} -> {ok, Heads}; + {error, _} = Err -> Err + end. + +write_version() -> + ok = rabbit_misc:write_term_file(schema_filename(), [desired_version()]), + ok. + +desired_version() -> + with_upgrade_graph(fun (G) -> heads(G) end). + +%% ------------------------------------------------------------------- + +with_upgrade_graph(Fun) -> + case rabbit_misc:build_acyclic_graph( + fun vertices/2, fun edges/2, + rabbit_misc:all_module_attributes(rabbit_upgrade)) of + {ok, G} -> try + Fun(G) + after + true = digraph:delete(G) + end; + {error, {vertex, duplicate, StepName}} -> + throw({error, {duplicate_upgrade_step, StepName}}); + {error, {edge, {bad_vertex, StepName}, _From, _To}} -> + throw({error, {dependency_on_unknown_upgrade_step, StepName}}); + {error, {edge, {bad_edge, StepNames}, _From, _To}} -> + throw({error, {cycle_in_upgrade_steps, StepNames}}) + end. + +vertices(Module, Steps) -> + [{StepName, {Module, StepName}} || {StepName, _Reqs} <- Steps]. + +edges(_Module, Steps) -> + [{Require, StepName} || {StepName, Requires} <- Steps, Require <- Requires]. + + +unknown_heads(Heads, G) -> + [H || H <- Heads, digraph:vertex(G, H) =:= false]. + +upgrades_to_apply(Heads, G) -> + %% Take all the vertices which can reach the known heads. That's + %% everything we've already applied. Subtract that from all + %% vertices: that's what we have to apply. + Unsorted = sets:to_list( + sets:subtract( + sets:from_list(digraph:vertices(G)), + sets:from_list(digraph_utils:reaching(Heads, G)))), + %% Form a subgraph from that list and find a topological ordering + %% so we can invoke them in order. + [element(2, digraph:vertex(G, StepName)) || + StepName <- digraph_utils:topsort(digraph_utils:subgraph(G, Unsorted))]. + +heads(G) -> + lists:sort([V || V <- digraph:vertices(G), digraph:out_degree(G, V) =:= 0]). + +%% ------------------------------------------------------------------- + +apply_upgrades(Upgrades) -> + LockFile = lock_filename(dir()), + case rabbit_misc:lock_file(LockFile) of + ok -> + BackupDir = dir() ++ "-upgrade-backup", + info("Upgrades: ~w to apply~n", [length(Upgrades)]), + case rabbit_mnesia:copy_db(BackupDir) of + ok -> + %% We need to make the backup after creating the + %% lock file so that it protects us from trying to + %% overwrite the backup. Unfortunately this means + %% the lock file exists in the backup too, which + %% is not intuitive. Remove it. + ok = file:delete(lock_filename(BackupDir)), + info("Upgrades: Mnesia dir backed up to ~p~n", [BackupDir]), + [apply_upgrade(Upgrade) || Upgrade <- Upgrades], + info("Upgrades: All upgrades applied successfully~n", []), + ok = write_version(), + ok = rabbit_misc:recursive_delete([BackupDir]), + info("Upgrades: Mnesia backup removed~n", []), + ok = file:delete(LockFile); + {error, E} -> + %% If we can't backup, the upgrade hasn't started + %% hence we don't need the lockfile since the real + %% mnesia dir is the good one. + ok = file:delete(LockFile), + throw({could_not_back_up_mnesia_dir, E}) + end; + {error, eexist} -> + throw({error, previous_upgrade_failed}) + end. + +apply_upgrade({M, F}) -> + info("Upgrades: Applying ~w:~w~n", [M, F]), + ok = apply(M, F, []). + +%% ------------------------------------------------------------------- + +dir() -> rabbit_mnesia:dir(). + +schema_filename() -> filename:join(dir(), ?VERSION_FILENAME). + +lock_filename(Dir) -> filename:join(Dir, ?LOCK_FILENAME). + +%% NB: we cannot use rabbit_log here since it may not have been +%% started yet +info(Msg, Args) -> error_logger:info_msg(Msg, Args). diff -Nru rabbitmq-server-1.7.2/src/rabbit_upgrade_functions.erl rabbitmq-server-2.3.1/src/rabbit_upgrade_functions.erl --- rabbitmq-server-1.7.2/src/rabbit_upgrade_functions.erl 1970-01-01 00:00:00.000000000 +0000 +++ rabbitmq-server-2.3.1/src/rabbit_upgrade_functions.erl 2011-02-03 12:47:35.000000000 +0000 @@ -0,0 +1,103 @@ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License +%% at http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and +%% limitations under the License. +%% +%% The Original Code is RabbitMQ. +%% +%% The Initial Developer of the Original Code is VMware, Inc. +%% Copyright (c) 2007-2011 VMware, Inc. All rights reserved. +%% + +-module(rabbit_upgrade_functions). + +-include("rabbit.hrl"). + +-compile([export_all]). + +-rabbit_upgrade({remove_user_scope, []}). +-rabbit_upgrade({hash_passwords, []}). +-rabbit_upgrade({add_ip_to_listener, []}). +-rabbit_upgrade({internal_exchanges, []}). +-rabbit_upgrade({user_to_internal_user, [hash_passwords]}). + +%% ------------------------------------------------------------------- + +-ifdef(use_specs). + +-spec(remove_user_scope/0 :: () -> 'ok'). +-spec(hash_passwords/0 :: () -> 'ok'). +-spec(add_ip_to_listener/0 :: () -> 'ok'). +-spec(internal_exchanges/0 :: () -> 'ok'). +-spec(user_to_internal_user/0 :: () -> 'ok'). + +-endif. + +%%-------------------------------------------------------------------- + +%% It's a bad idea to use records or record_info here, even for the +%% destination form. Because in the future, the destination form of +%% your current transform may not match the record any more, and it +%% would be messy to have to go back and fix old transforms at that +%% point. + +remove_user_scope() -> + mnesia( + rabbit_user_permission, + fun ({user_permission, UV, {permission, _Scope, Conf, Write, Read}}) -> + {user_permission, UV, {permission, Conf, Write, Read}} + end, + [user_vhost, permission]). + +hash_passwords() -> + mnesia( + rabbit_user, + fun ({user, Username, Password, IsAdmin}) -> + Hash = rabbit_auth_backend_internal:hash_password(Password), + {user, Username, Hash, IsAdmin} + end, + [username, password_hash, is_admin]). + +add_ip_to_listener() -> + mnesia( + rabbit_listener, + fun ({listener, Node, Protocol, Host, Port}) -> + {listener, Node, Protocol, Host, {0,0,0,0}, Port} + end, + [node, protocol, host, ip_address, port]). + +internal_exchanges() -> + Tables = [rabbit_exchange, rabbit_durable_exchange], + AddInternalFun = + fun ({exchange, Name, Type, Durable, AutoDelete, Args}) -> + {exchange, Name, Type, Durable, AutoDelete, false, Args} + end, + [ ok = mnesia(T, + AddInternalFun, + [name, type, durable, auto_delete, internal, arguments]) + || T <- Tables ], + ok. + +user_to_internal_user() -> + mnesia( + rabbit_user, + fun({user, Username, PasswordHash, IsAdmin}) -> + {internal_user, Username, PasswordHash, IsAdmin} + end, + [username, password_hash, is_admin], internal_user). + +%%-------------------------------------------------------------------- + +mnesia(TableName, Fun, FieldList) -> + {atomic, ok} = mnesia:transform_table(TableName, Fun, FieldList), + ok. + +mnesia(TableName, Fun, FieldList, NewRecordName) -> + {atomic, ok} = mnesia:transform_table(TableName, Fun, FieldList, + NewRecordName), + ok. diff -Nru rabbitmq-server-1.7.2/src/rabbit_variable_queue.erl rabbitmq-server-2.3.1/src/rabbit_variable_queue.erl --- rabbitmq-server-1.7.2/src/rabbit_variable_queue.erl 1970-01-01 00:00:00.000000000 +0000 +++ rabbitmq-server-2.3.1/src/rabbit_variable_queue.erl 2011-02-03 12:47:35.000000000 +0000 @@ -0,0 +1,1803 @@ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License +%% at http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and +%% limitations under the License. +%% +%% The Original Code is RabbitMQ. +%% +%% The Initial Developer of the Original Code is VMware, Inc. +%% Copyright (c) 2007-2011 VMware, Inc. All rights reserved. +%% + +-module(rabbit_variable_queue). + +-export([init/3, terminate/1, delete_and_terminate/1, + purge/1, publish/3, publish_delivered/4, fetch/2, ack/2, + tx_publish/4, tx_ack/3, tx_rollback/2, tx_commit/4, + requeue/3, len/1, is_empty/1, dropwhile/2, + set_ram_duration_target/2, ram_duration/1, + needs_idle_timeout/1, idle_timeout/1, handle_pre_hibernate/1, + status/1]). + +-export([start/1, stop/0]). + +%% exported for testing only +-export([start_msg_store/2, stop_msg_store/0, init/5]). + +%%---------------------------------------------------------------------------- +%% Definitions: + +%% alpha: this is a message where both the message itself, and its +%% position within the queue are held in RAM +%% +%% beta: this is a message where the message itself is only held on +%% disk, but its position within the queue is held in RAM. +%% +%% gamma: this is a message where the message itself is only held on +%% disk, but its position is both in RAM and on disk. +%% +%% delta: this is a collection of messages, represented by a single +%% term, where the messages and their position are only held on +%% disk. +%% +%% Note that for persistent messages, the message and its position +%% within the queue are always held on disk, *in addition* to being in +%% one of the above classifications. +%% +%% Also note that within this code, the term gamma never +%% appears. Instead, gammas are defined by betas who have had their +%% queue position recorded on disk. +%% +%% In general, messages move q1 -> q2 -> delta -> q3 -> q4, though +%% many of these steps are frequently skipped. q1 and q4 only hold +%% alphas, q2 and q3 hold both betas and gammas (as queues of queues, +%% using the bpqueue module where the block prefix determines whether +%% they're betas or gammas). When a message arrives, its +%% classification is determined. It is then added to the rightmost +%% appropriate queue. +%% +%% If a new message is determined to be a beta or gamma, q1 is +%% empty. If a new message is determined to be a delta, q1 and q2 are +%% empty (and actually q4 too). +%% +%% When removing messages from a queue, if q4 is empty then q3 is read +%% directly. If q3 becomes empty then the next segment's worth of +%% messages from delta are read into q3, reducing the size of +%% delta. If the queue is non empty, either q4 or q3 contain +%% entries. It is never permitted for delta to hold all the messages +%% in the queue. +%% +%% The duration indicated to us by the memory_monitor is used to +%% calculate, given our current ingress and egress rates, how many +%% messages we should hold in RAM. We track the ingress and egress +%% rates for both messages and pending acks and rates for both are +%% considered when calculating the number of messages to hold in +%% RAM. When we need to push alphas to betas or betas to gammas, we +%% favour writing out messages that are further from the head of the +%% queue. This minimises writes to disk, as the messages closer to the +%% tail of the queue stay in the queue for longer, thus do not need to +%% be replaced as quickly by sending other messages to disk. +%% +%% Whilst messages are pushed to disk and forgotten from RAM as soon +%% as requested by a new setting of the queue RAM duration, the +%% inverse is not true: we only load messages back into RAM as +%% demanded as the queue is read from. Thus only publishes to the +%% queue will take up available spare capacity. +%% +%% When we report our duration to the memory monitor, we calculate +%% average ingress and egress rates over the last two samples, and +%% then calculate our duration based on the sum of the ingress and +%% egress rates. More than two samples could be used, but it's a +%% balance between responding quickly enough to changes in +%% producers/consumers versus ignoring temporary blips. The problem +%% with temporary blips is that with just a few queues, they can have +%% substantial impact on the calculation of the average duration and +%% hence cause unnecessary I/O. Another alternative is to increase the +%% amqqueue_process:RAM_DURATION_UPDATE_PERIOD to beyond 5 +%% seconds. However, that then runs the risk of being too slow to +%% inform the memory monitor of changes. Thus a 5 second interval, +%% plus a rolling average over the last two samples seems to work +%% well in practice. +%% +%% The sum of the ingress and egress rates is used because the egress +%% rate alone is not sufficient. Adding in the ingress rate means that +%% queues which are being flooded by messages are given more memory, +%% resulting in them being able to process the messages faster (by +%% doing less I/O, or at least deferring it) and thus helping keep +%% their mailboxes empty and thus the queue as a whole is more +%% responsive. If such a queue also has fast but previously idle +%% consumers, the consumer can then start to be driven as fast as it +%% can go, whereas if only egress rate was being used, the incoming +%% messages may have to be written to disk and then read back in, +%% resulting in the hard disk being a bottleneck in driving the +%% consumers. Generally, we want to give Rabbit every chance of +%% getting rid of messages as fast as possible and remaining +%% responsive, and using only the egress rate impacts that goal. +%% +%% If a queue is full of transient messages, then the transition from +%% betas to deltas will be potentially very expensive as millions of +%% entries must be written to disk by the queue_index module. This can +%% badly stall the queue. In order to avoid this, the proportion of +%% gammas / (betas+gammas) must not be lower than (betas+gammas) / +%% (alphas+betas+gammas). As the queue grows or available memory +%% shrinks, the latter ratio increases, requiring the conversion of +%% more gammas to betas in order to maintain the invariant. At the +%% point at which betas and gammas must be converted to deltas, there +%% should be very few betas remaining, thus the transition is fast (no +%% work needs to be done for the gamma -> delta transition). +%% +%% The conversion of betas to gammas is done in batches of exactly +%% ?IO_BATCH_SIZE. This value should not be too small, otherwise the +%% frequent operations on the queues of q2 and q3 will not be +%% effectively amortised (switching the direction of queue access +%% defeats amortisation), nor should it be too big, otherwise +%% converting a batch stalls the queue for too long. Therefore, it +%% must be just right. ram_index_count is used here and is the number +%% of betas. +%% +%% The conversion from alphas to betas is also chunked, but only to +%% ensure no more than ?IO_BATCH_SIZE alphas are converted to betas at +%% any one time. This further smooths the effects of changes to the +%% target_ram_count and ensures the queue remains responsive +%% even when there is a large amount of IO work to do. The +%% idle_timeout callback is utilised to ensure that conversions are +%% done as promptly as possible whilst ensuring the queue remains +%% responsive. +%% +%% In the queue we keep track of both messages that are pending +%% delivery and messages that are pending acks. This ensures that +%% purging (deleting the former) and deletion (deleting the former and +%% the latter) are both cheap and do require any scanning through qi +%% segments. +%% +%% Pending acks are recorded in memory either as the tuple {SeqId, +%% Guid, MsgProps} (tuple-form) or as the message itself (message- +%% form). Acks for persistent messages are always stored in the tuple- +%% form. Acks for transient messages are also stored in tuple-form if +%% the message has been sent to disk as part of the memory reduction +%% process. For transient messages that haven't already been written +%% to disk, acks are stored in message-form. +%% +%% During memory reduction, acks stored in message-form are converted +%% to tuple-form, and the corresponding messages are pushed out to +%% disk. +%% +%% The order in which alphas are pushed to betas and message-form acks +%% are pushed to disk is determined dynamically. We always prefer to +%% push messages for the source (alphas or acks) that is growing the +%% fastest (with growth measured as avg. ingress - avg. egress). In +%% each round of memory reduction a chunk of messages at most +%% ?IO_BATCH_SIZE in size is allocated to be pushed to disk. The +%% fastest growing source will be reduced by as much of this chunk as +%% possible. If there is any remaining allocation in the chunk after +%% the first source has been reduced to zero, the second source will +%% be reduced by as much of the remaining chunk as possible. +%% +%% Notes on Clean Shutdown +%% (This documents behaviour in variable_queue, queue_index and +%% msg_store.) +%% +%% In order to try to achieve as fast a start-up as possible, if a +%% clean shutdown occurs, we try to save out state to disk to reduce +%% work on startup. In the msg_store this takes the form of the +%% index_module's state, plus the file_summary ets table, and client +%% refs. In the VQ, this takes the form of the count of persistent +%% messages in the queue and references into the msg_stores. The +%% queue_index adds to these terms the details of its segments and +%% stores the terms in the queue directory. +%% +%% Two message stores are used. One is created for persistent messages +%% to durable queues that must survive restarts, and the other is used +%% for all other messages that just happen to need to be written to +%% disk. On start up we can therefore nuke the transient message +%% store, and be sure that the messages in the persistent store are +%% all that we need. +%% +%% The references to the msg_stores are there so that the msg_store +%% knows to only trust its saved state if all of the queues it was +%% previously talking to come up cleanly. Likewise, the queues +%% themselves (esp queue_index) skips work in init if all the queues +%% and msg_store were shutdown cleanly. This gives both good speed +%% improvements and also robustness so that if anything possibly went +%% wrong in shutdown (or there was subsequent manual tampering), all +%% messages and queues that can be recovered are recovered, safely. +%% +%% To delete transient messages lazily, the variable_queue, on +%% startup, stores the next_seq_id reported by the queue_index as the +%% transient_threshold. From that point on, whenever it's reading a +%% message off disk via the queue_index, if the seq_id is below this +%% threshold and the message is transient then it drops the message +%% (the message itself won't exist on disk because it would have been +%% stored in the transient msg_store which would have had its saved +%% state nuked on startup). This avoids the expensive operation of +%% scanning the entire queue on startup in order to delete transient +%% messages that were only pushed to disk to save memory. +%% +%%---------------------------------------------------------------------------- + +-behaviour(rabbit_backing_queue). + +-record(vqstate, + { q1, + q2, + delta, + q3, + q4, + next_seq_id, + pending_ack, + pending_ack_index, + ram_ack_index, + index_state, + msg_store_clients, + on_sync, + durable, + transient_threshold, + + len, + persistent_count, + + target_ram_count, + ram_msg_count, + ram_msg_count_prev, + ram_ack_count_prev, + ram_index_count, + out_counter, + in_counter, + rates, + msgs_on_disk, + msg_indices_on_disk, + unconfirmed, + ack_out_counter, + ack_in_counter, + ack_rates + }). + +-record(rates, { egress, ingress, avg_egress, avg_ingress, timestamp }). + +-record(msg_status, + { seq_id, + guid, + msg, + is_persistent, + is_delivered, + msg_on_disk, + index_on_disk, + msg_props + }). + +-record(delta, + { start_seq_id, %% start_seq_id is inclusive + count, + end_seq_id %% end_seq_id is exclusive + }). + +-record(tx, { pending_messages, pending_acks }). + +-record(sync, { acks_persistent, acks_all, pubs, funs }). + +%% When we discover, on publish, that we should write some indices to +%% disk for some betas, the IO_BATCH_SIZE sets the number of betas +%% that we must be due to write indices for before we do any work at +%% all. This is both a minimum and a maximum - we don't write fewer +%% than IO_BATCH_SIZE indices out in one go, and we don't write more - +%% we can always come back on the next publish to do more. +-define(IO_BATCH_SIZE, 64). +-define(PERSISTENT_MSG_STORE, msg_store_persistent). +-define(TRANSIENT_MSG_STORE, msg_store_transient). + +-include("rabbit.hrl"). + +%%---------------------------------------------------------------------------- + +-ifdef(use_specs). + +-type(timestamp() :: {non_neg_integer(), non_neg_integer(), non_neg_integer()}). +-type(seq_id() :: non_neg_integer()). +-type(ack() :: seq_id()). + +-type(rates() :: #rates { egress :: {timestamp(), non_neg_integer()}, + ingress :: {timestamp(), non_neg_integer()}, + avg_egress :: float(), + avg_ingress :: float(), + timestamp :: timestamp() }). + +-type(delta() :: #delta { start_seq_id :: non_neg_integer(), + count :: non_neg_integer(), + end_seq_id :: non_neg_integer() }). + +-type(sync() :: #sync { acks_persistent :: [[seq_id()]], + acks_all :: [[seq_id()]], + pubs :: [{message_properties_transformer(), + [rabbit_types:basic_message()]}], + funs :: [fun (() -> any())] }). + +-type(state() :: #vqstate { + q1 :: queue(), + q2 :: bpqueue:bpqueue(), + delta :: delta(), + q3 :: bpqueue:bpqueue(), + q4 :: queue(), + next_seq_id :: seq_id(), + pending_ack :: dict(), + ram_ack_index :: gb_tree(), + index_state :: any(), + msg_store_clients :: 'undefined' | {{any(), binary()}, + {any(), binary()}}, + on_sync :: sync(), + durable :: boolean(), + + len :: non_neg_integer(), + persistent_count :: non_neg_integer(), + + transient_threshold :: non_neg_integer(), + target_ram_count :: non_neg_integer() | 'infinity', + ram_msg_count :: non_neg_integer(), + ram_msg_count_prev :: non_neg_integer(), + ram_index_count :: non_neg_integer(), + out_counter :: non_neg_integer(), + in_counter :: non_neg_integer(), + rates :: rates(), + msgs_on_disk :: gb_set(), + msg_indices_on_disk :: gb_set(), + unconfirmed :: gb_set(), + ack_out_counter :: non_neg_integer(), + ack_in_counter :: non_neg_integer(), + ack_rates :: rates() }). + +-include("rabbit_backing_queue_spec.hrl"). + +-endif. + +-define(BLANK_DELTA, #delta { start_seq_id = undefined, + count = 0, + end_seq_id = undefined }). +-define(BLANK_DELTA_PATTERN(Z), #delta { start_seq_id = Z, + count = 0, + end_seq_id = Z }). + +-define(BLANK_SYNC, #sync { acks_persistent = [], + acks_all = [], + pubs = [], + funs = [] }). + +%%---------------------------------------------------------------------------- +%% Public API +%%---------------------------------------------------------------------------- + +start(DurableQueues) -> + {AllTerms, StartFunState} = rabbit_queue_index:recover(DurableQueues), + start_msg_store( + [Ref || Terms <- AllTerms, + begin + Ref = proplists:get_value(persistent_ref, Terms), + Ref =/= undefined + end], + StartFunState). + +stop() -> stop_msg_store(). + +start_msg_store(Refs, StartFunState) -> + ok = rabbit_sup:start_child(?TRANSIENT_MSG_STORE, rabbit_msg_store, + [?TRANSIENT_MSG_STORE, rabbit_mnesia:dir(), + undefined, {fun (ok) -> finished end, ok}]), + ok = rabbit_sup:start_child(?PERSISTENT_MSG_STORE, rabbit_msg_store, + [?PERSISTENT_MSG_STORE, rabbit_mnesia:dir(), + Refs, StartFunState]). + +stop_msg_store() -> + ok = rabbit_sup:stop_child(?PERSISTENT_MSG_STORE), + ok = rabbit_sup:stop_child(?TRANSIENT_MSG_STORE). + +init(QueueName, IsDurable, Recover) -> + Self = self(), + init(QueueName, IsDurable, Recover, + fun (Guids, ActionTaken) -> + msgs_written_to_disk(Self, Guids, ActionTaken) + end, + fun (Guids) -> msg_indices_written_to_disk(Self, Guids) end). + +init(QueueName, IsDurable, false, MsgOnDiskFun, MsgIdxOnDiskFun) -> + IndexState = rabbit_queue_index:init(QueueName, MsgIdxOnDiskFun), + init(IsDurable, IndexState, 0, [], + case IsDurable of + true -> msg_store_client_init(?PERSISTENT_MSG_STORE, + MsgOnDiskFun); + false -> undefined + end, + msg_store_client_init(?TRANSIENT_MSG_STORE, undefined)); + +init(QueueName, true, true, MsgOnDiskFun, MsgIdxOnDiskFun) -> + Terms = rabbit_queue_index:shutdown_terms(QueueName), + {PRef, TRef, Terms1} = + case [persistent_ref, transient_ref] -- proplists:get_keys(Terms) of + [] -> {proplists:get_value(persistent_ref, Terms), + proplists:get_value(transient_ref, Terms), + Terms}; + _ -> {rabbit_guid:guid(), rabbit_guid:guid(), []} + end, + PersistentClient = msg_store_client_init(?PERSISTENT_MSG_STORE, PRef, + MsgOnDiskFun), + TransientClient = msg_store_client_init(?TRANSIENT_MSG_STORE, TRef, + undefined), + {DeltaCount, IndexState} = + rabbit_queue_index:recover( + QueueName, Terms1, + rabbit_msg_store:successfully_recovered_state(?PERSISTENT_MSG_STORE), + fun (Guid) -> + rabbit_msg_store:contains(Guid, PersistentClient) + end, + MsgIdxOnDiskFun), + init(true, IndexState, DeltaCount, Terms1, + PersistentClient, TransientClient). + +terminate(State) -> + State1 = #vqstate { persistent_count = PCount, + index_state = IndexState, + msg_store_clients = {MSCStateP, MSCStateT} } = + remove_pending_ack(true, tx_commit_index(State)), + PRef = case MSCStateP of + undefined -> undefined; + _ -> ok = rabbit_msg_store:client_terminate(MSCStateP), + rabbit_msg_store:client_ref(MSCStateP) + end, + ok = rabbit_msg_store:client_terminate(MSCStateT), + TRef = rabbit_msg_store:client_ref(MSCStateT), + Terms = [{persistent_ref, PRef}, + {transient_ref, TRef}, + {persistent_count, PCount}], + a(State1 #vqstate { index_state = rabbit_queue_index:terminate( + Terms, IndexState), + msg_store_clients = undefined }). + +%% the only difference between purge and delete is that delete also +%% needs to delete everything that's been delivered and not ack'd. +delete_and_terminate(State) -> + %% TODO: there is no need to interact with qi at all - which we do + %% as part of 'purge' and 'remove_pending_ack', other than + %% deleting it. + {_PurgeCount, State1} = purge(State), + State2 = #vqstate { index_state = IndexState, + msg_store_clients = {MSCStateP, MSCStateT} } = + remove_pending_ack(false, State1), + IndexState1 = rabbit_queue_index:delete_and_terminate(IndexState), + case MSCStateP of + undefined -> ok; + _ -> rabbit_msg_store:client_delete_and_terminate(MSCStateP) + end, + rabbit_msg_store:client_delete_and_terminate(MSCStateT), + a(State2 #vqstate { index_state = IndexState1, + msg_store_clients = undefined }). + +purge(State = #vqstate { q4 = Q4, + index_state = IndexState, + msg_store_clients = MSCState, + len = Len, + persistent_count = PCount }) -> + %% TODO: when there are no pending acks, which is a common case, + %% we could simply wipe the qi instead of issuing delivers and + %% acks for all the messages. + {LensByStore, IndexState1} = remove_queue_entries( + fun rabbit_misc:queue_fold/3, Q4, + orddict:new(), IndexState, MSCState), + {LensByStore1, State1 = #vqstate { q1 = Q1, + index_state = IndexState2, + msg_store_clients = MSCState1 }} = + purge_betas_and_deltas(LensByStore, + State #vqstate { q4 = queue:new(), + index_state = IndexState1 }), + {LensByStore2, IndexState3} = remove_queue_entries( + fun rabbit_misc:queue_fold/3, Q1, + LensByStore1, IndexState2, MSCState1), + PCount1 = PCount - find_persistent_count(LensByStore2), + {Len, a(State1 #vqstate { q1 = queue:new(), + index_state = IndexState3, + len = 0, + ram_msg_count = 0, + ram_index_count = 0, + persistent_count = PCount1 })}. + +publish(Msg, MsgProps, State) -> + {_SeqId, State1} = publish(Msg, MsgProps, false, false, State), + a(reduce_memory_use(State1)). + +publish_delivered(false, #basic_message { guid = Guid }, + _MsgProps, State = #vqstate { len = 0 }) -> + blind_confirm(self(), gb_sets:singleton(Guid)), + {undefined, a(State)}; +publish_delivered(true, Msg = #basic_message { is_persistent = IsPersistent, + guid = Guid }, + MsgProps = #message_properties { + needs_confirming = NeedsConfirming }, + State = #vqstate { len = 0, + next_seq_id = SeqId, + out_counter = OutCount, + in_counter = InCount, + persistent_count = PCount, + durable = IsDurable, + unconfirmed = UC }) -> + IsPersistent1 = IsDurable andalso IsPersistent, + MsgStatus = (msg_status(IsPersistent1, SeqId, Msg, MsgProps)) + #msg_status { is_delivered = true }, + {MsgStatus1, State1} = maybe_write_to_disk(false, false, MsgStatus, State), + State2 = record_pending_ack(m(MsgStatus1), State1), + PCount1 = PCount + one_if(IsPersistent1), + UC1 = gb_sets_maybe_insert(NeedsConfirming, Guid, UC), + {SeqId, a(reduce_memory_use( + State2 #vqstate { next_seq_id = SeqId + 1, + out_counter = OutCount + 1, + in_counter = InCount + 1, + persistent_count = PCount1, + unconfirmed = UC1 }))}. + +dropwhile(Pred, State) -> + {_OkOrEmpty, State1} = dropwhile1(Pred, State), + State1. + +dropwhile1(Pred, State) -> + internal_queue_out( + fun(MsgStatus = #msg_status { msg_props = MsgProps }, State1) -> + case Pred(MsgProps) of + true -> + {_, State2} = internal_fetch(false, MsgStatus, State1), + dropwhile1(Pred, State2); + false -> + %% message needs to go back into Q4 (or maybe go + %% in for the first time if it was loaded from + %% Q3). Also the msg contents might not be in + %% RAM, so read them in now + {MsgStatus1, State2 = #vqstate { q4 = Q4 }} = + read_msg(MsgStatus, State1), + {ok, State2 #vqstate {q4 = queue:in_r(MsgStatus1, Q4) }} + end + end, State). + +fetch(AckRequired, State) -> + internal_queue_out( + fun(MsgStatus, State1) -> + %% it's possible that the message wasn't read from disk + %% at this point, so read it in. + {MsgStatus1, State2} = read_msg(MsgStatus, State1), + internal_fetch(AckRequired, MsgStatus1, State2) + end, State). + +internal_queue_out(Fun, State = #vqstate { q4 = Q4 }) -> + case queue:out(Q4) of + {empty, _Q4} -> + case fetch_from_q3(State) of + {empty, State1} = Result -> a(State1), Result; + {loaded, {MsgStatus, State1}} -> Fun(MsgStatus, State1) + end; + {{value, MsgStatus}, Q4a} -> + Fun(MsgStatus, State #vqstate { q4 = Q4a }) + end. + +read_msg(MsgStatus = #msg_status { msg = undefined, + guid = Guid, + is_persistent = IsPersistent }, + State = #vqstate { ram_msg_count = RamMsgCount, + msg_store_clients = MSCState}) -> + {{ok, Msg = #basic_message {}}, MSCState1} = + msg_store_read(MSCState, IsPersistent, Guid), + {MsgStatus #msg_status { msg = Msg }, + State #vqstate { ram_msg_count = RamMsgCount + 1, + msg_store_clients = MSCState1 }}; +read_msg(MsgStatus, State) -> + {MsgStatus, State}. + +internal_fetch(AckRequired, MsgStatus = #msg_status { + seq_id = SeqId, + guid = Guid, + msg = Msg, + is_persistent = IsPersistent, + is_delivered = IsDelivered, + msg_on_disk = MsgOnDisk, + index_on_disk = IndexOnDisk }, + State = #vqstate {ram_msg_count = RamMsgCount, + out_counter = OutCount, + index_state = IndexState, + msg_store_clients = MSCState, + len = Len, + persistent_count = PCount }) -> + %% 1. Mark it delivered if necessary + IndexState1 = maybe_write_delivered( + IndexOnDisk andalso not IsDelivered, + SeqId, IndexState), + + %% 2. Remove from msg_store and queue index, if necessary + Rem = fun () -> + ok = msg_store_remove(MSCState, IsPersistent, [Guid]) + end, + Ack = fun () -> rabbit_queue_index:ack([SeqId], IndexState1) end, + IndexState2 = + case {AckRequired, MsgOnDisk, IndexOnDisk, IsPersistent} of + {false, true, false, _} -> Rem(), IndexState1; + {false, true, true, _} -> Rem(), Ack(); + { true, true, true, false} -> Ack(); + _ -> IndexState1 + end, + + %% 3. If an ack is required, add something sensible to PA + {AckTag, State1} = case AckRequired of + true -> StateN = record_pending_ack( + MsgStatus #msg_status { + is_delivered = true }, State), + {SeqId, StateN}; + false -> {undefined, State} + end, + + PCount1 = PCount - one_if(IsPersistent andalso not AckRequired), + Len1 = Len - 1, + RamMsgCount1 = RamMsgCount - one_if(Msg =/= undefined), + + {{Msg, IsDelivered, AckTag, Len1}, + a(State1 #vqstate { ram_msg_count = RamMsgCount1, + out_counter = OutCount + 1, + index_state = IndexState2, + len = Len1, + persistent_count = PCount1 })}. + +ack(AckTags, State) -> + a(ack(fun msg_store_remove/3, + fun (_, State0) -> State0 end, + AckTags, State)). + +tx_publish(Txn, Msg = #basic_message { is_persistent = IsPersistent }, MsgProps, + State = #vqstate { durable = IsDurable, + msg_store_clients = MSCState }) -> + Tx = #tx { pending_messages = Pubs } = lookup_tx(Txn), + store_tx(Txn, Tx #tx { pending_messages = [{Msg, MsgProps} | Pubs] }), + case IsPersistent andalso IsDurable of + true -> MsgStatus = msg_status(true, undefined, Msg, MsgProps), + #msg_status { msg_on_disk = true } = + maybe_write_msg_to_disk(false, MsgStatus, MSCState); + false -> ok + end, + a(State). + +tx_ack(Txn, AckTags, State) -> + Tx = #tx { pending_acks = Acks } = lookup_tx(Txn), + store_tx(Txn, Tx #tx { pending_acks = [AckTags | Acks] }), + State. + +tx_rollback(Txn, State = #vqstate { durable = IsDurable, + msg_store_clients = MSCState }) -> + #tx { pending_acks = AckTags, pending_messages = Pubs } = lookup_tx(Txn), + erase_tx(Txn), + ok = case IsDurable of + true -> msg_store_remove(MSCState, true, persistent_guids(Pubs)); + false -> ok + end, + {lists:append(AckTags), a(State)}. + +tx_commit(Txn, Fun, MsgPropsFun, + State = #vqstate { durable = IsDurable, + msg_store_clients = MSCState }) -> + #tx { pending_acks = AckTags, pending_messages = Pubs } = lookup_tx(Txn), + erase_tx(Txn), + AckTags1 = lists:append(AckTags), + PersistentGuids = persistent_guids(Pubs), + HasPersistentPubs = PersistentGuids =/= [], + {AckTags1, + a(case IsDurable andalso HasPersistentPubs of + true -> ok = msg_store_sync( + MSCState, true, PersistentGuids, + msg_store_callback(PersistentGuids, Pubs, AckTags1, + Fun, MsgPropsFun)), + State; + false -> tx_commit_post_msg_store(HasPersistentPubs, Pubs, AckTags1, + Fun, MsgPropsFun, State) + end)}. + +requeue(AckTags, MsgPropsFun, State) -> + MsgPropsFun1 = fun (MsgProps) -> + (MsgPropsFun(MsgProps)) #message_properties { + needs_confirming = false } + end, + a(reduce_memory_use( + ack(fun msg_store_release/3, + fun (#msg_status { msg = Msg, msg_props = MsgProps }, State1) -> + {_SeqId, State2} = publish(Msg, MsgPropsFun1(MsgProps), + true, false, State1), + State2; + ({IsPersistent, Guid, MsgProps}, State1) -> + #vqstate { msg_store_clients = MSCState } = State1, + {{ok, Msg = #basic_message{}}, MSCState1} = + msg_store_read(MSCState, IsPersistent, Guid), + State2 = State1 #vqstate { msg_store_clients = MSCState1 }, + {_SeqId, State3} = publish(Msg, MsgPropsFun1(MsgProps), + true, true, State2), + State3 + end, + AckTags, State))). + +len(#vqstate { len = Len }) -> Len. + +is_empty(State) -> 0 == len(State). + +set_ram_duration_target( + DurationTarget, State = #vqstate { + rates = #rates { avg_egress = AvgEgressRate, + avg_ingress = AvgIngressRate }, + ack_rates = #rates { avg_egress = AvgAckEgressRate, + avg_ingress = AvgAckIngressRate }, + target_ram_count = TargetRamCount }) -> + Rate = + AvgEgressRate + AvgIngressRate + AvgAckEgressRate + AvgAckIngressRate, + TargetRamCount1 = + case DurationTarget of + infinity -> infinity; + _ -> trunc(DurationTarget * Rate) %% msgs = sec * msgs/sec + end, + State1 = State #vqstate { target_ram_count = TargetRamCount1 }, + a(case TargetRamCount1 == infinity orelse + (TargetRamCount =/= infinity andalso + TargetRamCount1 >= TargetRamCount) of + true -> State1; + false -> reduce_memory_use(State1) + end). + +ram_duration(State = #vqstate { + rates = #rates { timestamp = Timestamp, + egress = Egress, + ingress = Ingress } = Rates, + ack_rates = #rates { timestamp = AckTimestamp, + egress = AckEgress, + ingress = AckIngress } = ARates, + in_counter = InCount, + out_counter = OutCount, + ack_in_counter = AckInCount, + ack_out_counter = AckOutCount, + ram_msg_count = RamMsgCount, + ram_msg_count_prev = RamMsgCountPrev, + ram_ack_index = RamAckIndex, + ram_ack_count_prev = RamAckCountPrev }) -> + Now = now(), + {AvgEgressRate, Egress1} = update_rate(Now, Timestamp, OutCount, Egress), + {AvgIngressRate, Ingress1} = update_rate(Now, Timestamp, InCount, Ingress), + + {AvgAckEgressRate, AckEgress1} = + update_rate(Now, AckTimestamp, AckOutCount, AckEgress), + {AvgAckIngressRate, AckIngress1} = + update_rate(Now, AckTimestamp, AckInCount, AckIngress), + + RamAckCount = gb_trees:size(RamAckIndex), + + Duration = %% msgs+acks / (msgs+acks/sec) == sec + case AvgEgressRate == 0 andalso AvgIngressRate == 0 andalso + AvgAckEgressRate == 0 andalso AvgAckIngressRate == 0 of + true -> infinity; + false -> (RamMsgCountPrev + RamMsgCount + + RamAckCount + RamAckCountPrev) / + (4 * (AvgEgressRate + AvgIngressRate + + AvgAckEgressRate + AvgAckIngressRate)) + end, + + {Duration, State #vqstate { + rates = Rates #rates { + egress = Egress1, + ingress = Ingress1, + avg_egress = AvgEgressRate, + avg_ingress = AvgIngressRate, + timestamp = Now }, + ack_rates = ARates #rates { + egress = AckEgress1, + ingress = AckIngress1, + avg_egress = AvgAckEgressRate, + avg_ingress = AvgAckIngressRate, + timestamp = Now }, + in_counter = 0, + out_counter = 0, + ack_in_counter = 0, + ack_out_counter = 0, + ram_msg_count_prev = RamMsgCount, + ram_ack_count_prev = RamAckCount }}. + +needs_idle_timeout(State = #vqstate { on_sync = OnSync }) -> + case {OnSync, needs_index_sync(State)} of + {?BLANK_SYNC, false} -> + {Res, _State} = reduce_memory_use( + fun (_Quota, State1) -> {0, State1} end, + fun (_Quota, State1) -> State1 end, + fun (State1) -> State1 end, + fun (_Quota, State1) -> {0, State1} end, + State), + Res; + _ -> + true + end. + +idle_timeout(State) -> + a(reduce_memory_use(confirm_commit_index(tx_commit_index(State)))). + +handle_pre_hibernate(State = #vqstate { index_state = IndexState }) -> + State #vqstate { index_state = rabbit_queue_index:flush(IndexState) }. + +status(#vqstate { + q1 = Q1, q2 = Q2, delta = Delta, q3 = Q3, q4 = Q4, + len = Len, + pending_ack = PA, + ram_ack_index = RAI, + on_sync = #sync { funs = From }, + target_ram_count = TargetRamCount, + ram_msg_count = RamMsgCount, + ram_index_count = RamIndexCount, + next_seq_id = NextSeqId, + persistent_count = PersistentCount, + rates = #rates { avg_egress = AvgEgressRate, + avg_ingress = AvgIngressRate }, + ack_rates = #rates { avg_egress = AvgAckEgressRate, + avg_ingress = AvgAckIngressRate } }) -> + [ {q1 , queue:len(Q1)}, + {q2 , bpqueue:len(Q2)}, + {delta , Delta}, + {q3 , bpqueue:len(Q3)}, + {q4 , queue:len(Q4)}, + {len , Len}, + {pending_acks , dict:size(PA)}, + {outstanding_txns , length(From)}, + {target_ram_count , TargetRamCount}, + {ram_msg_count , RamMsgCount}, + {ram_ack_count , gb_trees:size(RAI)}, + {ram_index_count , RamIndexCount}, + {next_seq_id , NextSeqId}, + {persistent_count , PersistentCount}, + {avg_ingress_rate , AvgIngressRate}, + {avg_egress_rate , AvgEgressRate}, + {avg_ack_ingress_rate, AvgAckIngressRate}, + {avg_ack_egress_rate , AvgAckEgressRate} ]. + +%%---------------------------------------------------------------------------- +%% Minor helpers +%%---------------------------------------------------------------------------- + +a(State = #vqstate { q1 = Q1, q2 = Q2, delta = Delta, q3 = Q3, q4 = Q4, + len = Len, + persistent_count = PersistentCount, + ram_msg_count = RamMsgCount, + ram_index_count = RamIndexCount }) -> + E1 = queue:is_empty(Q1), + E2 = bpqueue:is_empty(Q2), + ED = Delta#delta.count == 0, + E3 = bpqueue:is_empty(Q3), + E4 = queue:is_empty(Q4), + LZ = Len == 0, + + true = E1 or not E3, + true = E2 or not ED, + true = ED or not E3, + true = LZ == (E3 and E4), + + true = Len >= 0, + true = PersistentCount >= 0, + true = RamMsgCount >= 0, + true = RamIndexCount >= 0, + + State. + +m(MsgStatus = #msg_status { msg = Msg, + is_persistent = IsPersistent, + msg_on_disk = MsgOnDisk, + index_on_disk = IndexOnDisk }) -> + true = (not IsPersistent) or IndexOnDisk, + true = (not IndexOnDisk) or MsgOnDisk, + true = (Msg =/= undefined) or MsgOnDisk, + + MsgStatus. + +one_if(true ) -> 1; +one_if(false) -> 0. + +cons_if(true, E, L) -> [E | L]; +cons_if(false, _E, L) -> L. + +gb_sets_maybe_insert(false, _Val, Set) -> Set; +%% when requeueing, we re-add a guid to the unconfirmed set +gb_sets_maybe_insert(true, Val, Set) -> gb_sets:add(Val, Set). + +msg_status(IsPersistent, SeqId, Msg = #basic_message { guid = Guid }, + MsgProps) -> + #msg_status { seq_id = SeqId, guid = Guid, msg = Msg, + is_persistent = IsPersistent, is_delivered = false, + msg_on_disk = false, index_on_disk = false, + msg_props = MsgProps }. + +with_msg_store_state({MSCStateP, MSCStateT}, true, Fun) -> + {Result, MSCStateP1} = Fun(MSCStateP), + {Result, {MSCStateP1, MSCStateT}}; +with_msg_store_state({MSCStateP, MSCStateT}, false, Fun) -> + {Result, MSCStateT1} = Fun(MSCStateT), + {Result, {MSCStateP, MSCStateT1}}. + +with_immutable_msg_store_state(MSCState, IsPersistent, Fun) -> + {Res, MSCState} = with_msg_store_state(MSCState, IsPersistent, + fun (MSCState1) -> + {Fun(MSCState1), MSCState1} + end), + Res. + +msg_store_client_init(MsgStore, MsgOnDiskFun) -> + msg_store_client_init(MsgStore, rabbit_guid:guid(), MsgOnDiskFun). + +msg_store_client_init(MsgStore, Ref, MsgOnDiskFun) -> + rabbit_msg_store:client_init( + MsgStore, Ref, MsgOnDiskFun, + msg_store_close_fds_fun(MsgStore =:= ?PERSISTENT_MSG_STORE)). + +msg_store_write(MSCState, IsPersistent, Guid, Msg) -> + with_immutable_msg_store_state( + MSCState, IsPersistent, + fun (MSCState1) -> rabbit_msg_store:write(Guid, Msg, MSCState1) end). + +msg_store_read(MSCState, IsPersistent, Guid) -> + with_msg_store_state( + MSCState, IsPersistent, + fun (MSCState1) -> rabbit_msg_store:read(Guid, MSCState1) end). + +msg_store_remove(MSCState, IsPersistent, Guids) -> + with_immutable_msg_store_state( + MSCState, IsPersistent, + fun (MCSState1) -> rabbit_msg_store:remove(Guids, MCSState1) end). + +msg_store_release(MSCState, IsPersistent, Guids) -> + with_immutable_msg_store_state( + MSCState, IsPersistent, + fun (MCSState1) -> rabbit_msg_store:release(Guids, MCSState1) end). + +msg_store_sync(MSCState, IsPersistent, Guids, Callback) -> + with_immutable_msg_store_state( + MSCState, IsPersistent, + fun (MSCState1) -> rabbit_msg_store:sync(Guids, Callback, MSCState1) end). + +msg_store_close_fds(MSCState, IsPersistent) -> + with_msg_store_state( + MSCState, IsPersistent, + fun (MSCState1) -> rabbit_msg_store:close_all_indicated(MSCState1) end). + +msg_store_close_fds_fun(IsPersistent) -> + Self = self(), + fun () -> + rabbit_amqqueue:maybe_run_queue_via_backing_queue_async( + Self, + fun (State = #vqstate { msg_store_clients = MSCState }) -> + {ok, MSCState1} = + msg_store_close_fds(MSCState, IsPersistent), + {[], State #vqstate { msg_store_clients = MSCState1 }} + end) + end. + +maybe_write_delivered(false, _SeqId, IndexState) -> + IndexState; +maybe_write_delivered(true, SeqId, IndexState) -> + rabbit_queue_index:deliver([SeqId], IndexState). + +lookup_tx(Txn) -> case get({txn, Txn}) of + undefined -> #tx { pending_messages = [], + pending_acks = [] }; + V -> V + end. + +store_tx(Txn, Tx) -> put({txn, Txn}, Tx). + +erase_tx(Txn) -> erase({txn, Txn}). + +persistent_guids(Pubs) -> + [Guid || {#basic_message { guid = Guid, + is_persistent = true }, _MsgProps} <- Pubs]. + +betas_from_index_entries(List, TransientThreshold, IndexState) -> + {Filtered, Delivers, Acks} = + lists:foldr( + fun ({Guid, SeqId, MsgProps, IsPersistent, IsDelivered}, + {Filtered1, Delivers1, Acks1}) -> + case SeqId < TransientThreshold andalso not IsPersistent of + true -> {Filtered1, + cons_if(not IsDelivered, SeqId, Delivers1), + [SeqId | Acks1]}; + false -> {[m(#msg_status { msg = undefined, + guid = Guid, + seq_id = SeqId, + is_persistent = IsPersistent, + is_delivered = IsDelivered, + msg_on_disk = true, + index_on_disk = true, + msg_props = MsgProps + }) | Filtered1], + Delivers1, + Acks1} + end + end, {[], [], []}, List), + {bpqueue:from_list([{true, Filtered}]), + rabbit_queue_index:ack(Acks, + rabbit_queue_index:deliver(Delivers, IndexState))}. + +%% the first arg is the older delta +combine_deltas(?BLANK_DELTA_PATTERN(X), ?BLANK_DELTA_PATTERN(Y)) -> + ?BLANK_DELTA; +combine_deltas(?BLANK_DELTA_PATTERN(X), #delta { start_seq_id = Start, + count = Count, + end_seq_id = End } = B) -> + true = Start + Count =< End, %% ASSERTION + B; +combine_deltas(#delta { start_seq_id = Start, + count = Count, + end_seq_id = End } = A, ?BLANK_DELTA_PATTERN(Y)) -> + true = Start + Count =< End, %% ASSERTION + A; +combine_deltas(#delta { start_seq_id = StartLow, + count = CountLow, + end_seq_id = EndLow }, + #delta { start_seq_id = StartHigh, + count = CountHigh, + end_seq_id = EndHigh }) -> + Count = CountLow + CountHigh, + true = (StartLow =< StartHigh) %% ASSERTIONS + andalso ((StartLow + CountLow) =< EndLow) + andalso ((StartHigh + CountHigh) =< EndHigh) + andalso ((StartLow + Count) =< EndHigh), + #delta { start_seq_id = StartLow, count = Count, end_seq_id = EndHigh }. + +beta_fold(Fun, Init, Q) -> + bpqueue:foldr(fun (_Prefix, Value, Acc) -> Fun(Value, Acc) end, Init, Q). + +update_rate(Now, Then, Count, {OThen, OCount}) -> + %% avg over the current period and the previous + {1000000.0 * (Count + OCount) / timer:now_diff(Now, OThen), {Then, Count}}. + +%%---------------------------------------------------------------------------- +%% Internal major helpers for Public API +%%---------------------------------------------------------------------------- + +init(IsDurable, IndexState, DeltaCount, Terms, + PersistentClient, TransientClient) -> + {LowSeqId, NextSeqId, IndexState1} = rabbit_queue_index:bounds(IndexState), + + DeltaCount1 = proplists:get_value(persistent_count, Terms, DeltaCount), + Delta = case DeltaCount1 == 0 andalso DeltaCount /= undefined of + true -> ?BLANK_DELTA; + false -> #delta { start_seq_id = LowSeqId, + count = DeltaCount1, + end_seq_id = NextSeqId } + end, + Now = now(), + State = #vqstate { + q1 = queue:new(), + q2 = bpqueue:new(), + delta = Delta, + q3 = bpqueue:new(), + q4 = queue:new(), + next_seq_id = NextSeqId, + pending_ack = dict:new(), + ram_ack_index = gb_trees:empty(), + index_state = IndexState1, + msg_store_clients = {PersistentClient, TransientClient}, + on_sync = ?BLANK_SYNC, + durable = IsDurable, + transient_threshold = NextSeqId, + + len = DeltaCount1, + persistent_count = DeltaCount1, + + target_ram_count = infinity, + ram_msg_count = 0, + ram_msg_count_prev = 0, + ram_ack_count_prev = 0, + ram_index_count = 0, + out_counter = 0, + in_counter = 0, + rates = blank_rate(Now, DeltaCount1), + msgs_on_disk = gb_sets:new(), + msg_indices_on_disk = gb_sets:new(), + unconfirmed = gb_sets:new(), + ack_out_counter = 0, + ack_in_counter = 0, + ack_rates = blank_rate(Now, 0) }, + a(maybe_deltas_to_betas(State)). + +blank_rate(Timestamp, IngressLength) -> + #rates { egress = {Timestamp, 0}, + ingress = {Timestamp, IngressLength}, + avg_egress = 0.0, + avg_ingress = 0.0, + timestamp = Timestamp }. + +msg_store_callback(PersistentGuids, Pubs, AckTags, Fun, MsgPropsFun) -> + Self = self(), + F = fun () -> rabbit_amqqueue:maybe_run_queue_via_backing_queue( + Self, fun (StateN) -> {[], tx_commit_post_msg_store( + true, Pubs, AckTags, + Fun, MsgPropsFun, StateN)} + end) + end, + fun () -> spawn(fun () -> ok = rabbit_misc:with_exit_handler( + fun () -> remove_persistent_messages( + PersistentGuids) + end, F) + end) + end. + +remove_persistent_messages(Guids) -> + PersistentClient = msg_store_client_init(?PERSISTENT_MSG_STORE, undefined), + ok = rabbit_msg_store:remove(Guids, PersistentClient), + rabbit_msg_store:client_delete_and_terminate(PersistentClient). + +tx_commit_post_msg_store(HasPersistentPubs, Pubs, AckTags, Fun, MsgPropsFun, + State = #vqstate { + on_sync = OnSync = #sync { + acks_persistent = SPAcks, + acks_all = SAcks, + pubs = SPubs, + funs = SFuns }, + pending_ack = PA, + durable = IsDurable }) -> + PersistentAcks = + case IsDurable of + true -> [AckTag || AckTag <- AckTags, + case dict:fetch(AckTag, PA) of + #msg_status {} -> + false; + {IsPersistent, _Guid, _MsgProps} -> + IsPersistent + end]; + false -> [] + end, + case IsDurable andalso (HasPersistentPubs orelse PersistentAcks =/= []) of + true -> State #vqstate { + on_sync = #sync { + acks_persistent = [PersistentAcks | SPAcks], + acks_all = [AckTags | SAcks], + pubs = [{MsgPropsFun, Pubs} | SPubs], + funs = [Fun | SFuns] }}; + false -> State1 = tx_commit_index( + State #vqstate { + on_sync = #sync { + acks_persistent = [], + acks_all = [AckTags], + pubs = [{MsgPropsFun, Pubs}], + funs = [Fun] } }), + State1 #vqstate { on_sync = OnSync } + end. + +tx_commit_index(State = #vqstate { on_sync = ?BLANK_SYNC }) -> + State; +tx_commit_index(State = #vqstate { on_sync = #sync { + acks_persistent = SPAcks, + acks_all = SAcks, + pubs = SPubs, + funs = SFuns }, + durable = IsDurable }) -> + PAcks = lists:append(SPAcks), + Acks = lists:append(SAcks), + Pubs = [{Msg, Fun(MsgProps)} || {Fun, PubsN} <- lists:reverse(SPubs), + {Msg, MsgProps} <- lists:reverse(PubsN)], + {SeqIds, State1 = #vqstate { index_state = IndexState }} = + lists:foldl( + fun ({Msg = #basic_message { is_persistent = IsPersistent }, + MsgProps}, + {SeqIdsAcc, State2}) -> + IsPersistent1 = IsDurable andalso IsPersistent, + {SeqId, State3} = + publish(Msg, MsgProps, false, IsPersistent1, State2), + {cons_if(IsPersistent1, SeqId, SeqIdsAcc), State3} + end, {PAcks, ack(Acks, State)}, Pubs), + IndexState1 = rabbit_queue_index:sync(SeqIds, IndexState), + [ Fun() || Fun <- lists:reverse(SFuns) ], + reduce_memory_use( + State1 #vqstate { index_state = IndexState1, on_sync = ?BLANK_SYNC }). + +purge_betas_and_deltas(LensByStore, + State = #vqstate { q3 = Q3, + index_state = IndexState, + msg_store_clients = MSCState }) -> + case bpqueue:is_empty(Q3) of + true -> {LensByStore, State}; + false -> {LensByStore1, IndexState1} = + remove_queue_entries(fun beta_fold/3, Q3, + LensByStore, IndexState, MSCState), + purge_betas_and_deltas(LensByStore1, + maybe_deltas_to_betas( + State #vqstate { + q3 = bpqueue:new(), + index_state = IndexState1 })) + end. + +remove_queue_entries(Fold, Q, LensByStore, IndexState, MSCState) -> + {GuidsByStore, Delivers, Acks} = + Fold(fun remove_queue_entries1/2, {orddict:new(), [], []}, Q), + ok = orddict:fold(fun (IsPersistent, Guids, ok) -> + msg_store_remove(MSCState, IsPersistent, Guids) + end, ok, GuidsByStore), + {sum_guids_by_store_to_len(LensByStore, GuidsByStore), + rabbit_queue_index:ack(Acks, + rabbit_queue_index:deliver(Delivers, IndexState))}. + +remove_queue_entries1( + #msg_status { guid = Guid, seq_id = SeqId, + is_delivered = IsDelivered, msg_on_disk = MsgOnDisk, + index_on_disk = IndexOnDisk, is_persistent = IsPersistent }, + {GuidsByStore, Delivers, Acks}) -> + {case MsgOnDisk of + true -> rabbit_misc:orddict_cons(IsPersistent, Guid, GuidsByStore); + false -> GuidsByStore + end, + cons_if(IndexOnDisk andalso not IsDelivered, SeqId, Delivers), + cons_if(IndexOnDisk, SeqId, Acks)}. + +sum_guids_by_store_to_len(LensByStore, GuidsByStore) -> + orddict:fold( + fun (IsPersistent, Guids, LensByStore1) -> + orddict:update_counter(IsPersistent, length(Guids), LensByStore1) + end, LensByStore, GuidsByStore). + +%%---------------------------------------------------------------------------- +%% Internal gubbins for publishing +%%---------------------------------------------------------------------------- + +publish(Msg = #basic_message { is_persistent = IsPersistent, guid = Guid }, + MsgProps = #message_properties { needs_confirming = NeedsConfirming }, + IsDelivered, MsgOnDisk, + State = #vqstate { q1 = Q1, q3 = Q3, q4 = Q4, + next_seq_id = SeqId, + len = Len, + in_counter = InCount, + persistent_count = PCount, + durable = IsDurable, + ram_msg_count = RamMsgCount, + unconfirmed = UC }) -> + IsPersistent1 = IsDurable andalso IsPersistent, + MsgStatus = (msg_status(IsPersistent1, SeqId, Msg, MsgProps)) + #msg_status { is_delivered = IsDelivered, msg_on_disk = MsgOnDisk}, + {MsgStatus1, State1} = maybe_write_to_disk(false, false, MsgStatus, State), + State2 = case bpqueue:is_empty(Q3) of + false -> State1 #vqstate { q1 = queue:in(m(MsgStatus1), Q1) }; + true -> State1 #vqstate { q4 = queue:in(m(MsgStatus1), Q4) } + end, + PCount1 = PCount + one_if(IsPersistent1), + UC1 = gb_sets_maybe_insert(NeedsConfirming, Guid, UC), + {SeqId, State2 #vqstate { next_seq_id = SeqId + 1, + len = Len + 1, + in_counter = InCount + 1, + persistent_count = PCount1, + ram_msg_count = RamMsgCount + 1, + unconfirmed = UC1 }}. + +maybe_write_msg_to_disk(_Force, MsgStatus = #msg_status { + msg_on_disk = true }, _MSCState) -> + MsgStatus; +maybe_write_msg_to_disk(Force, MsgStatus = #msg_status { + msg = Msg, guid = Guid, + is_persistent = IsPersistent }, MSCState) + when Force orelse IsPersistent -> + Msg1 = Msg #basic_message { + %% don't persist any recoverable decoded properties + content = rabbit_binary_parser:clear_decoded_content( + Msg #basic_message.content)}, + ok = msg_store_write(MSCState, IsPersistent, Guid, Msg1), + MsgStatus #msg_status { msg_on_disk = true }; +maybe_write_msg_to_disk(_Force, MsgStatus, _MSCState) -> + MsgStatus. + +maybe_write_index_to_disk(_Force, MsgStatus = #msg_status { + index_on_disk = true }, IndexState) -> + true = MsgStatus #msg_status.msg_on_disk, %% ASSERTION + {MsgStatus, IndexState}; +maybe_write_index_to_disk(Force, MsgStatus = #msg_status { + guid = Guid, + seq_id = SeqId, + is_persistent = IsPersistent, + is_delivered = IsDelivered, + msg_props = MsgProps}, IndexState) + when Force orelse IsPersistent -> + true = MsgStatus #msg_status.msg_on_disk, %% ASSERTION + IndexState1 = rabbit_queue_index:publish( + Guid, SeqId, MsgProps, IsPersistent, IndexState), + {MsgStatus #msg_status { index_on_disk = true }, + maybe_write_delivered(IsDelivered, SeqId, IndexState1)}; +maybe_write_index_to_disk(_Force, MsgStatus, IndexState) -> + {MsgStatus, IndexState}. + +maybe_write_to_disk(ForceMsg, ForceIndex, MsgStatus, + State = #vqstate { index_state = IndexState, + msg_store_clients = MSCState }) -> + MsgStatus1 = maybe_write_msg_to_disk(ForceMsg, MsgStatus, MSCState), + {MsgStatus2, IndexState1} = + maybe_write_index_to_disk(ForceIndex, MsgStatus1, IndexState), + {MsgStatus2, State #vqstate { index_state = IndexState1 }}. + +%%---------------------------------------------------------------------------- +%% Internal gubbins for acks +%%---------------------------------------------------------------------------- + +record_pending_ack(#msg_status { seq_id = SeqId, + guid = Guid, + is_persistent = IsPersistent, + msg_on_disk = MsgOnDisk, + msg_props = MsgProps } = MsgStatus, + State = #vqstate { pending_ack = PA, + ram_ack_index = RAI, + ack_in_counter = AckInCount}) -> + {AckEntry, RAI1} = + case MsgOnDisk of + true -> {{IsPersistent, Guid, MsgProps}, RAI}; + false -> {MsgStatus, gb_trees:insert(SeqId, Guid, RAI)} + end, + PA1 = dict:store(SeqId, AckEntry, PA), + State #vqstate { pending_ack = PA1, + ram_ack_index = RAI1, + ack_in_counter = AckInCount + 1}. + +remove_pending_ack(KeepPersistent, + State = #vqstate { pending_ack = PA, + index_state = IndexState, + msg_store_clients = MSCState }) -> + {PersistentSeqIds, GuidsByStore} = + dict:fold(fun accumulate_ack/3, accumulate_ack_init(), PA), + State1 = State #vqstate { pending_ack = dict:new(), + ram_ack_index = gb_trees:empty() }, + case KeepPersistent of + true -> case orddict:find(false, GuidsByStore) of + error -> State1; + {ok, Guids} -> ok = msg_store_remove(MSCState, false, + Guids), + State1 + end; + false -> IndexState1 = + rabbit_queue_index:ack(PersistentSeqIds, IndexState), + [ok = msg_store_remove(MSCState, IsPersistent, Guids) + || {IsPersistent, Guids} <- orddict:to_list(GuidsByStore)], + State1 #vqstate { index_state = IndexState1 } + end. + +ack(_MsgStoreFun, _Fun, [], State) -> + State; +ack(MsgStoreFun, Fun, AckTags, State) -> + {{PersistentSeqIds, GuidsByStore}, + State1 = #vqstate { index_state = IndexState, + msg_store_clients = MSCState, + persistent_count = PCount, + ack_out_counter = AckOutCount }} = + lists:foldl( + fun (SeqId, {Acc, State2 = #vqstate { pending_ack = PA, + ram_ack_index = RAI }}) -> + AckEntry = dict:fetch(SeqId, PA), + {accumulate_ack(SeqId, AckEntry, Acc), + Fun(AckEntry, State2 #vqstate { + pending_ack = dict:erase(SeqId, PA), + ram_ack_index = + gb_trees:delete_any(SeqId, RAI)})} + end, {accumulate_ack_init(), State}, AckTags), + IndexState1 = rabbit_queue_index:ack(PersistentSeqIds, IndexState), + [ok = MsgStoreFun(MSCState, IsPersistent, Guids) + || {IsPersistent, Guids} <- orddict:to_list(GuidsByStore)], + PCount1 = PCount - find_persistent_count(sum_guids_by_store_to_len( + orddict:new(), GuidsByStore)), + State1 #vqstate { index_state = IndexState1, + persistent_count = PCount1, + ack_out_counter = AckOutCount + length(AckTags) }. + +accumulate_ack_init() -> {[], orddict:new()}. + +accumulate_ack(_SeqId, #msg_status { is_persistent = false, %% ASSERTIONS + msg_on_disk = false, + index_on_disk = false }, + {PersistentSeqIdsAcc, GuidsByStore}) -> + {PersistentSeqIdsAcc, GuidsByStore}; +accumulate_ack(SeqId, {IsPersistent, Guid, _MsgProps}, + {PersistentSeqIdsAcc, GuidsByStore}) -> + {cons_if(IsPersistent, SeqId, PersistentSeqIdsAcc), + rabbit_misc:orddict_cons(IsPersistent, Guid, GuidsByStore)}. + +find_persistent_count(LensByStore) -> + case orddict:find(true, LensByStore) of + error -> 0; + {ok, Len} -> Len + end. + +%%---------------------------------------------------------------------------- +%% Internal plumbing for confirms (aka publisher acks) +%%---------------------------------------------------------------------------- + +confirm_commit_index(State = #vqstate { index_state = IndexState }) -> + case needs_index_sync(State) of + true -> State #vqstate { + index_state = rabbit_queue_index:sync(IndexState) }; + false -> State + end. + +remove_confirms(GuidSet, State = #vqstate { msgs_on_disk = MOD, + msg_indices_on_disk = MIOD, + unconfirmed = UC }) -> + State #vqstate { msgs_on_disk = gb_sets:difference(MOD, GuidSet), + msg_indices_on_disk = gb_sets:difference(MIOD, GuidSet), + unconfirmed = gb_sets:difference(UC, GuidSet) }. + +needs_index_sync(#vqstate { msg_indices_on_disk = MIOD, + unconfirmed = UC }) -> + %% If UC is empty then by definition, MIOD and MOD are also empty + %% and there's nothing that can be pending a sync. + + %% If UC is not empty, then we want to find is_empty(UC - MIOD), + %% but the subtraction can be expensive. Thus instead, we test to + %% see if UC is a subset of MIOD. This can only be the case if + %% MIOD == UC, which would indicate that every message in UC is + %% also in MIOD and is thus _all_ pending on a msg_store sync, not + %% on a qi sync. Thus the negation of this is sufficient. Because + %% is_subset is short circuiting, this is more efficient than the + %% subtraction. + not (gb_sets:is_empty(UC) orelse gb_sets:is_subset(UC, MIOD)). + +msgs_confirmed(GuidSet, State) -> + {gb_sets:to_list(GuidSet), remove_confirms(GuidSet, State)}. + +blind_confirm(QPid, GuidSet) -> + rabbit_amqqueue:maybe_run_queue_via_backing_queue_async( + QPid, fun (State) -> msgs_confirmed(GuidSet, State) end). + +msgs_written_to_disk(QPid, GuidSet, removed) -> + blind_confirm(QPid, GuidSet); +msgs_written_to_disk(QPid, GuidSet, written) -> + rabbit_amqqueue:maybe_run_queue_via_backing_queue_async( + QPid, fun (State = #vqstate { msgs_on_disk = MOD, + msg_indices_on_disk = MIOD, + unconfirmed = UC }) -> + msgs_confirmed(gb_sets:intersection(GuidSet, MIOD), + State #vqstate { + msgs_on_disk = + gb_sets:intersection( + gb_sets:union(MOD, GuidSet), UC) }) + end). + +msg_indices_written_to_disk(QPid, GuidSet) -> + rabbit_amqqueue:maybe_run_queue_via_backing_queue_async( + QPid, fun (State = #vqstate { msgs_on_disk = MOD, + msg_indices_on_disk = MIOD, + unconfirmed = UC }) -> + msgs_confirmed(gb_sets:intersection(GuidSet, MOD), + State #vqstate { + msg_indices_on_disk = + gb_sets:intersection( + gb_sets:union(MIOD, GuidSet), UC) }) + end). + +%%---------------------------------------------------------------------------- +%% Phase changes +%%---------------------------------------------------------------------------- + +%% Determine whether a reduction in memory use is necessary, and call +%% functions to perform the required phase changes. The function can +%% also be used to just do the former, by passing in dummy phase +%% change functions. +%% +%% The function does not report on any needed beta->delta conversions, +%% though the conversion function for that is called as necessary. The +%% reason is twofold. Firstly, this is safe because the conversion is +%% only ever necessary just after a transition to a +%% target_ram_count of zero or after an incremental alpha->beta +%% conversion. In the former case the conversion is performed straight +%% away (i.e. any betas present at the time are converted to deltas), +%% and in the latter case the need for a conversion is flagged up +%% anyway. Secondly, this is necessary because we do not have a +%% precise and cheap predicate for determining whether a beta->delta +%% conversion is necessary - due to the complexities of retaining up +%% one segment's worth of messages in q3 - and thus would risk +%% perpetually reporting the need for a conversion when no such +%% conversion is needed. That in turn could cause an infinite loop. +reduce_memory_use(_AlphaBetaFun, _BetaGammaFun, _BetaDeltaFun, _AckFun, + State = #vqstate {target_ram_count = infinity}) -> + {false, State}; +reduce_memory_use(AlphaBetaFun, BetaGammaFun, BetaDeltaFun, AckFun, + State = #vqstate { + ram_ack_index = RamAckIndex, + ram_msg_count = RamMsgCount, + target_ram_count = TargetRamCount, + rates = #rates { avg_ingress = AvgIngress, + avg_egress = AvgEgress }, + ack_rates = #rates { avg_ingress = AvgAckIngress, + avg_egress = AvgAckEgress } + }) -> + + {Reduce, State1} = + case chunk_size(RamMsgCount + gb_trees:size(RamAckIndex), + TargetRamCount) of + 0 -> {false, State}; + %% Reduce memory of pending acks and alphas. The order is + %% determined based on which is growing faster. Whichever + %% comes second may very well get a quota of 0 if the + %% first manages to push out the max number of messages. + S1 -> {_, State2} = + lists:foldl(fun (ReduceFun, {QuotaN, StateN}) -> + ReduceFun(QuotaN, StateN) + end, + {S1, State}, + case (AvgAckIngress - AvgAckEgress) > + (AvgIngress - AvgEgress) of + true -> [AckFun, AlphaBetaFun]; + false -> [AlphaBetaFun, AckFun] + end), + {true, State2} + end, + + case State1 #vqstate.target_ram_count of + 0 -> {Reduce, BetaDeltaFun(State1)}; + _ -> case chunk_size(State1 #vqstate.ram_index_count, + permitted_ram_index_count(State1)) of + ?IO_BATCH_SIZE = S2 -> {true, BetaGammaFun(S2, State1)}; + _ -> {Reduce, State1} + end + end. + +limit_ram_acks(0, State) -> + {0, State}; +limit_ram_acks(Quota, State = #vqstate { pending_ack = PA, + ram_ack_index = RAI }) -> + case gb_trees:is_empty(RAI) of + true -> + {Quota, State}; + false -> + {SeqId, Guid, RAI1} = gb_trees:take_largest(RAI), + MsgStatus = #msg_status { + guid = Guid, %% ASSERTION + is_persistent = false, %% ASSERTION + msg_props = MsgProps } = dict:fetch(SeqId, PA), + {_, State1} = maybe_write_to_disk(true, false, MsgStatus, State), + limit_ram_acks(Quota - 1, + State1 #vqstate { + pending_ack = + dict:store(SeqId, {false, Guid, MsgProps}, PA), + ram_ack_index = RAI1 }) + end. + + +reduce_memory_use(State) -> + {_, State1} = reduce_memory_use(fun push_alphas_to_betas/2, + fun limit_ram_index/2, + fun push_betas_to_deltas/1, + fun limit_ram_acks/2, + State), + State1. + +limit_ram_index(Quota, State = #vqstate { q2 = Q2, q3 = Q3, + index_state = IndexState, + ram_index_count = RamIndexCount }) -> + {Q2a, {Quota1, IndexState1}} = limit_ram_index( + fun bpqueue:map_fold_filter_r/4, + Q2, {Quota, IndexState}), + %% TODO: we shouldn't be writing index entries for messages that + %% can never end up in delta due them residing in the only segment + %% held by q3. + {Q3a, {Quota2, IndexState2}} = limit_ram_index( + fun bpqueue:map_fold_filter_r/4, + Q3, {Quota1, IndexState1}), + State #vqstate { q2 = Q2a, q3 = Q3a, + index_state = IndexState2, + ram_index_count = RamIndexCount - (Quota - Quota2) }. + +limit_ram_index(_MapFoldFilterFun, Q, {0, IndexState}) -> + {Q, {0, IndexState}}; +limit_ram_index(MapFoldFilterFun, Q, {Quota, IndexState}) -> + MapFoldFilterFun( + fun erlang:'not'/1, + fun (MsgStatus, {0, _IndexStateN}) -> + false = MsgStatus #msg_status.index_on_disk, %% ASSERTION + stop; + (MsgStatus, {N, IndexStateN}) when N > 0 -> + false = MsgStatus #msg_status.index_on_disk, %% ASSERTION + {MsgStatus1, IndexStateN1} = + maybe_write_index_to_disk(true, MsgStatus, IndexStateN), + {true, m(MsgStatus1), {N-1, IndexStateN1}} + end, {Quota, IndexState}, Q). + +permitted_ram_index_count(#vqstate { len = 0 }) -> + infinity; +permitted_ram_index_count(#vqstate { len = Len, + q2 = Q2, + q3 = Q3, + delta = #delta { count = DeltaCount } }) -> + BetaLen = bpqueue:len(Q2) + bpqueue:len(Q3), + BetaLen - trunc(BetaLen * BetaLen / (Len - DeltaCount)). + +chunk_size(Current, Permitted) + when Permitted =:= infinity orelse Permitted >= Current -> + 0; +chunk_size(Current, Permitted) -> + lists:min([Current - Permitted, ?IO_BATCH_SIZE]). + +fetch_from_q3(State = #vqstate { + q1 = Q1, + q2 = Q2, + delta = #delta { count = DeltaCount }, + q3 = Q3, + q4 = Q4, + ram_index_count = RamIndexCount}) -> + case bpqueue:out(Q3) of + {empty, _Q3} -> + {empty, State}; + {{value, IndexOnDisk, MsgStatus}, Q3a} -> + RamIndexCount1 = RamIndexCount - one_if(not IndexOnDisk), + true = RamIndexCount1 >= 0, %% ASSERTION + State1 = State #vqstate { q3 = Q3a, + ram_index_count = RamIndexCount1 }, + State2 = + case {bpqueue:is_empty(Q3a), 0 == DeltaCount} of + {true, true} -> + %% q3 is now empty, it wasn't before; delta is + %% still empty. So q2 must be empty, and we + %% know q4 is empty otherwise we wouldn't be + %% loading from q3. As such, we can just set + %% q4 to Q1. + true = bpqueue:is_empty(Q2), %% ASSERTION + true = queue:is_empty(Q4), %% ASSERTION + State1 #vqstate { q1 = queue:new(), + q4 = Q1 }; + {true, false} -> + maybe_deltas_to_betas(State1); + {false, _} -> + %% q3 still isn't empty, we've not touched + %% delta, so the invariants between q1, q2, + %% delta and q3 are maintained + State1 + end, + {loaded, {MsgStatus, State2}} + end. + +maybe_deltas_to_betas(State = #vqstate { delta = ?BLANK_DELTA_PATTERN(X) }) -> + State; +maybe_deltas_to_betas(State = #vqstate { + q2 = Q2, + delta = Delta, + q3 = Q3, + index_state = IndexState, + transient_threshold = TransientThreshold }) -> + #delta { start_seq_id = DeltaSeqId, + count = DeltaCount, + end_seq_id = DeltaSeqIdEnd } = Delta, + DeltaSeqId1 = + lists:min([rabbit_queue_index:next_segment_boundary(DeltaSeqId), + DeltaSeqIdEnd]), + {List, IndexState1} = + rabbit_queue_index:read(DeltaSeqId, DeltaSeqId1, IndexState), + {Q3a, IndexState2} = + betas_from_index_entries(List, TransientThreshold, IndexState1), + State1 = State #vqstate { index_state = IndexState2 }, + case bpqueue:len(Q3a) of + 0 -> + %% we ignored every message in the segment due to it being + %% transient and below the threshold + maybe_deltas_to_betas( + State1 #vqstate { + delta = Delta #delta { start_seq_id = DeltaSeqId1 }}); + Q3aLen -> + Q3b = bpqueue:join(Q3, Q3a), + case DeltaCount - Q3aLen of + 0 -> + %% delta is now empty, but it wasn't before, so + %% can now join q2 onto q3 + State1 #vqstate { q2 = bpqueue:new(), + delta = ?BLANK_DELTA, + q3 = bpqueue:join(Q3b, Q2) }; + N when N > 0 -> + Delta1 = #delta { start_seq_id = DeltaSeqId1, + count = N, + end_seq_id = DeltaSeqIdEnd }, + State1 #vqstate { delta = Delta1, + q3 = Q3b } + end + end. + +push_alphas_to_betas(Quota, State) -> + {Quota1, State1} = maybe_push_q1_to_betas(Quota, State), + {Quota2, State2} = maybe_push_q4_to_betas(Quota1, State1), + {Quota2, State2}. + +maybe_push_q1_to_betas(Quota, State = #vqstate { q1 = Q1 }) -> + maybe_push_alphas_to_betas( + fun queue:out/1, + fun (MsgStatus = #msg_status { index_on_disk = IndexOnDisk }, + Q1a, State1 = #vqstate { q3 = Q3, delta = #delta { count = 0 } }) -> + State1 #vqstate { q1 = Q1a, + q3 = bpqueue:in(IndexOnDisk, MsgStatus, Q3) }; + (MsgStatus = #msg_status { index_on_disk = IndexOnDisk }, + Q1a, State1 = #vqstate { q2 = Q2 }) -> + State1 #vqstate { q1 = Q1a, + q2 = bpqueue:in(IndexOnDisk, MsgStatus, Q2) } + end, Quota, Q1, State). + +maybe_push_q4_to_betas(Quota, State = #vqstate { q4 = Q4 }) -> + maybe_push_alphas_to_betas( + fun queue:out_r/1, + fun (MsgStatus = #msg_status { index_on_disk = IndexOnDisk }, + Q4a, State1 = #vqstate { q3 = Q3 }) -> + State1 #vqstate { q3 = bpqueue:in_r(IndexOnDisk, MsgStatus, Q3), + q4 = Q4a } + end, Quota, Q4, State). + +maybe_push_alphas_to_betas(_Generator, _Consumer, Quota, _Q, + State = #vqstate { + ram_msg_count = RamMsgCount, + target_ram_count = TargetRamCount }) + when Quota =:= 0 orelse + TargetRamCount =:= infinity orelse + TargetRamCount >= RamMsgCount -> + {Quota, State}; +maybe_push_alphas_to_betas(Generator, Consumer, Quota, Q, State) -> + case Generator(Q) of + {empty, _Q} -> + {Quota, State}; + {{value, MsgStatus}, Qa} -> + {MsgStatus1 = #msg_status { msg_on_disk = true, + index_on_disk = IndexOnDisk }, + State1 = #vqstate { ram_msg_count = RamMsgCount, + ram_index_count = RamIndexCount }} = + maybe_write_to_disk(true, false, MsgStatus, State), + MsgStatus2 = m(MsgStatus1 #msg_status { msg = undefined }), + RamIndexCount1 = RamIndexCount + one_if(not IndexOnDisk), + State2 = State1 #vqstate { ram_msg_count = RamMsgCount - 1, + ram_index_count = RamIndexCount1 }, + maybe_push_alphas_to_betas(Generator, Consumer, Quota - 1, Qa, + Consumer(MsgStatus2, Qa, State2)) + end. + +push_betas_to_deltas(State = #vqstate { q2 = Q2, + delta = Delta, + q3 = Q3, + index_state = IndexState, + ram_index_count = RamIndexCount }) -> + {Delta2, Q2a, RamIndexCount2, IndexState2} = + push_betas_to_deltas(fun (Q2MinSeqId) -> Q2MinSeqId end, + fun bpqueue:out/1, Q2, + RamIndexCount, IndexState), + {Delta3, Q3a, RamIndexCount3, IndexState3} = + push_betas_to_deltas(fun rabbit_queue_index:next_segment_boundary/1, + fun bpqueue:out_r/1, Q3, + RamIndexCount2, IndexState2), + Delta4 = combine_deltas(Delta3, combine_deltas(Delta, Delta2)), + State #vqstate { q2 = Q2a, + delta = Delta4, + q3 = Q3a, + index_state = IndexState3, + ram_index_count = RamIndexCount3 }. + +push_betas_to_deltas(LimitFun, Generator, Q, RamIndexCount, IndexState) -> + case bpqueue:out(Q) of + {empty, _Q} -> + {?BLANK_DELTA, Q, RamIndexCount, IndexState}; + {{value, _IndexOnDisk1, #msg_status { seq_id = MinSeqId }}, _Qa} -> + {{value, _IndexOnDisk2, #msg_status { seq_id = MaxSeqId }}, _Qb} = + bpqueue:out_r(Q), + Limit = LimitFun(MinSeqId), + case MaxSeqId < Limit of + true -> {?BLANK_DELTA, Q, RamIndexCount, IndexState}; + false -> {Len, Qc, RamIndexCount1, IndexState1} = + push_betas_to_deltas(Generator, Limit, Q, 0, + RamIndexCount, IndexState), + {#delta { start_seq_id = Limit, + count = Len, + end_seq_id = MaxSeqId + 1 }, + Qc, RamIndexCount1, IndexState1} + end + end. + +push_betas_to_deltas(Generator, Limit, Q, Count, RamIndexCount, IndexState) -> + case Generator(Q) of + {empty, _Q} -> + {Count, Q, RamIndexCount, IndexState}; + {{value, _IndexOnDisk, #msg_status { seq_id = SeqId }}, _Qa} + when SeqId < Limit -> + {Count, Q, RamIndexCount, IndexState}; + {{value, IndexOnDisk, MsgStatus}, Qa} -> + {RamIndexCount1, IndexState1} = + case IndexOnDisk of + true -> {RamIndexCount, IndexState}; + false -> {#msg_status { index_on_disk = true }, + IndexState2} = + maybe_write_index_to_disk(true, MsgStatus, + IndexState), + {RamIndexCount - 1, IndexState2} + end, + push_betas_to_deltas( + Generator, Limit, Qa, Count + 1, RamIndexCount1, IndexState1) + end. diff -Nru rabbitmq-server-1.7.2/src/rabbit_vhost.erl rabbitmq-server-2.3.1/src/rabbit_vhost.erl --- rabbitmq-server-1.7.2/src/rabbit_vhost.erl 1970-01-01 00:00:00.000000000 +0000 +++ rabbitmq-server-2.3.1/src/rabbit_vhost.erl 2011-02-03 12:47:35.000000000 +0000 @@ -0,0 +1,106 @@ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License +%% at http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and +%% limitations under the License. +%% +%% The Original Code is RabbitMQ. +%% +%% The Initial Developer of the Original Code is VMware, Inc. +%% Copyright (c) 2007-2011 VMware, Inc. All rights reserved. +%% + +-module(rabbit_vhost). + +-include("rabbit.hrl"). + +%%---------------------------------------------------------------------------- + +-export([add/1, delete/1, exists/1, list/0, with/2]). + +-ifdef(use_specs). + +-spec(add/1 :: (rabbit_types:vhost()) -> 'ok'). +-spec(delete/1 :: (rabbit_types:vhost()) -> 'ok'). +-spec(exists/1 :: (rabbit_types:vhost()) -> boolean()). +-spec(list/0 :: () -> [rabbit_types:vhost()]). +-spec(with/2 :: (rabbit_types:vhost(), rabbit_misc:thunk(A)) -> A). + +-endif. + +%%---------------------------------------------------------------------------- + +add(VHostPath) -> + R = rabbit_misc:execute_mnesia_transaction( + fun () -> + case mnesia:wread({rabbit_vhost, VHostPath}) of + [] -> ok = mnesia:write(rabbit_vhost, + #vhost{virtual_host = VHostPath}, + write); + [_] -> mnesia:abort({vhost_already_exists, VHostPath}) + end + end, + fun (ok, true) -> + ok; + (ok, false) -> + [rabbit_exchange:declare( + rabbit_misc:r(VHostPath, exchange, Name), + Type, true, false, false, []) || + {Name,Type} <- + [{<<"">>, direct}, + {<<"amq.direct">>, direct}, + {<<"amq.topic">>, topic}, + {<<"amq.match">>, headers}, %% per 0-9-1 pdf + {<<"amq.headers">>, headers}, %% per 0-9-1 xml + {<<"amq.fanout">>, fanout}]], + ok + end), + rabbit_log:info("Added vhost ~p~n", [VHostPath]), + R. + +delete(VHostPath) -> + %% FIXME: We are forced to delete the queues and exchanges outside + %% the TX below. Queue deletion involves sending messages to the queue + %% process, which in turn results in further mnesia actions and + %% eventually the termination of that process. Exchange deletion causes + %% notifications which must be sent outside the TX + [{ok,_} = rabbit_amqqueue:delete(Q, false, false) || + Q <- rabbit_amqqueue:list(VHostPath)], + [ok = rabbit_exchange:delete(Name, false) || + #exchange{name = Name} <- rabbit_exchange:list(VHostPath)], + R = rabbit_misc:execute_mnesia_transaction( + with(VHostPath, fun () -> + ok = internal_delete(VHostPath) + end)), + rabbit_log:info("Deleted vhost ~p~n", [VHostPath]), + R. + +internal_delete(VHostPath) -> + lists:foreach( + fun ({Username, _, _, _}) -> + ok = rabbit_auth_backend_internal:clear_permissions(Username, + VHostPath) + end, + rabbit_auth_backend_internal:list_vhost_permissions(VHostPath)), + ok = mnesia:delete({rabbit_vhost, VHostPath}), + ok. + +exists(VHostPath) -> + mnesia:dirty_read({rabbit_vhost, VHostPath}) /= []. + +list() -> + mnesia:dirty_all_keys(rabbit_vhost). + +with(VHostPath, Thunk) -> + fun () -> + case mnesia:read({rabbit_vhost, VHostPath}) of + [] -> + mnesia:abort({no_such_vhost, VHostPath}); + [_V] -> + Thunk() + end + end. diff -Nru rabbitmq-server-1.7.2/src/rabbit_writer.erl rabbitmq-server-2.3.1/src/rabbit_writer.erl --- rabbitmq-server-1.7.2/src/rabbit_writer.erl 2010-02-15 16:01:03.000000000 +0000 +++ rabbitmq-server-2.3.1/src/rabbit_writer.erl 2011-02-03 12:47:35.000000000 +0000 @@ -1,46 +1,30 @@ -%% The contents of this file are subject to the Mozilla Public License -%% Version 1.1 (the "License"); you may not use this file except in -%% compliance with the License. You may obtain a copy of the License at -%% http://www.mozilla.org/MPL/ -%% -%% Software distributed under the License is distributed on an "AS IS" -%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the -%% License for the specific language governing rights and limitations -%% under the License. -%% -%% The Original Code is RabbitMQ. -%% -%% The Initial Developers of the Original Code are LShift Ltd, -%% Cohesive Financial Technologies LLC, and Rabbit Technologies Ltd. -%% -%% Portions created before 22-Nov-2008 00:00:00 GMT by LShift Ltd, -%% Cohesive Financial Technologies LLC, or Rabbit Technologies Ltd -%% are Copyright (C) 2007-2008 LShift Ltd, Cohesive Financial -%% Technologies LLC, and Rabbit Technologies Ltd. -%% -%% Portions created by LShift Ltd are Copyright (C) 2007-2010 LShift -%% Ltd. Portions created by Cohesive Financial Technologies LLC are -%% Copyright (C) 2007-2010 Cohesive Financial Technologies -%% LLC. Portions created by Rabbit Technologies Ltd are Copyright -%% (C) 2007-2010 Rabbit Technologies Ltd. +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License +%% at http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and +%% limitations under the License. %% -%% All Rights Reserved. +%% The Original Code is RabbitMQ. %% -%% Contributor(s): ______________________________________. +%% The Initial Developer of the Original Code is VMware, Inc. +%% Copyright (c) 2007-2011 VMware, Inc. All rights reserved. %% -module(rabbit_writer). -include("rabbit.hrl"). -include("rabbit_framing.hrl"). --export([start/3, start_link/3, shutdown/1, mainloop/1]). --export([send_command/2, send_command/3, send_command_and_signal_back/3, - send_command_and_signal_back/4, send_command_and_notify/5]). --export([internal_send_command/3, internal_send_command/5]). +-export([start/5, start_link/5, mainloop/2, mainloop1/2]). +-export([send_command/2, send_command/3, + send_command_sync/2, send_command_sync/3, + send_command_and_notify/4, send_command_and_notify/5]). +-export([internal_send_command/4, internal_send_command/6]). --import(gen_tcp). - --record(wstate, {sock, channel, frame_max}). +-record(wstate, {sock, channel, frame_max, protocol}). -define(HIBERNATE_AFTER, 5000). @@ -48,80 +32,104 @@ -ifdef(use_specs). --spec(start/3 :: (socket(), channel_number(), non_neg_integer()) -> pid()). --spec(start_link/3 :: (socket(), channel_number(), non_neg_integer()) -> pid()). --spec(send_command/2 :: (pid(), amqp_method()) -> 'ok'). --spec(send_command/3 :: (pid(), amqp_method(), content()) -> 'ok'). --spec(send_command_and_signal_back/3 :: (pid(), amqp_method(), pid()) -> 'ok'). --spec(send_command_and_signal_back/4 :: - (pid(), amqp_method(), content(), pid()) -> 'ok'). +-spec(start/5 :: + (rabbit_net:socket(), rabbit_channel:channel_number(), + non_neg_integer(), rabbit_types:protocol(), pid()) + -> rabbit_types:ok(pid())). +-spec(start_link/5 :: + (rabbit_net:socket(), rabbit_channel:channel_number(), + non_neg_integer(), rabbit_types:protocol(), pid()) + -> rabbit_types:ok(pid())). +-spec(send_command/2 :: + (pid(), rabbit_framing:amqp_method_record()) -> 'ok'). +-spec(send_command/3 :: + (pid(), rabbit_framing:amqp_method_record(), rabbit_types:content()) + -> 'ok'). +-spec(send_command_sync/2 :: + (pid(), rabbit_framing:amqp_method_record()) -> 'ok'). +-spec(send_command_sync/3 :: + (pid(), rabbit_framing:amqp_method_record(), rabbit_types:content()) + -> 'ok'). +-spec(send_command_and_notify/4 :: + (pid(), pid(), pid(), rabbit_framing:amqp_method_record()) + -> 'ok'). -spec(send_command_and_notify/5 :: - (pid(), pid(), pid(), amqp_method(), content()) -> 'ok'). --spec(internal_send_command/3 :: - (socket(), channel_number(), amqp_method()) -> 'ok'). --spec(internal_send_command/5 :: - (socket(), channel_number(), amqp_method(), - content(), non_neg_integer()) -> 'ok'). + (pid(), pid(), pid(), rabbit_framing:amqp_method_record(), + rabbit_types:content()) + -> 'ok'). +-spec(internal_send_command/4 :: + (rabbit_net:socket(), rabbit_channel:channel_number(), + rabbit_framing:amqp_method_record(), rabbit_types:protocol()) + -> 'ok'). +-spec(internal_send_command/6 :: + (rabbit_net:socket(), rabbit_channel:channel_number(), + rabbit_framing:amqp_method_record(), rabbit_types:content(), + non_neg_integer(), rabbit_types:protocol()) + -> 'ok'). -endif. %%---------------------------------------------------------------------------- -start(Sock, Channel, FrameMax) -> - spawn(?MODULE, mainloop, [#wstate{sock = Sock, - channel = Channel, - frame_max = FrameMax}]). - -start_link(Sock, Channel, FrameMax) -> - spawn_link(?MODULE, mainloop, [#wstate{sock = Sock, - channel = Channel, - frame_max = FrameMax}]). +start(Sock, Channel, FrameMax, Protocol, ReaderPid) -> + {ok, + proc_lib:spawn(?MODULE, mainloop, [ReaderPid, + #wstate{sock = Sock, + channel = Channel, + frame_max = FrameMax, + protocol = Protocol}])}. + +start_link(Sock, Channel, FrameMax, Protocol, ReaderPid) -> + {ok, + proc_lib:spawn_link(?MODULE, mainloop, [ReaderPid, + #wstate{sock = Sock, + channel = Channel, + frame_max = FrameMax, + protocol = Protocol}])}. + +mainloop(ReaderPid, State) -> + try + mainloop1(ReaderPid, State) + catch + exit:Error -> ReaderPid ! {channel_exit, #wstate.channel, Error} + end, + done. -mainloop(State) -> +mainloop1(ReaderPid, State) -> receive - Message -> ?MODULE:mainloop(handle_message(Message, State)) + Message -> ?MODULE:mainloop1(ReaderPid, handle_message(Message, State)) after ?HIBERNATE_AFTER -> - erlang:hibernate(?MODULE, mainloop, [State]) + erlang:hibernate(?MODULE, mainloop, [ReaderPid, State]) end. -handle_message({send_command, MethodRecord}, - State = #wstate{sock = Sock, channel = Channel}) -> - ok = internal_send_command_async(Sock, Channel, MethodRecord), - State; -handle_message({send_command, MethodRecord, Content}, - State = #wstate{sock = Sock, - channel = Channel, - frame_max = FrameMax}) -> - ok = internal_send_command_async(Sock, Channel, MethodRecord, - Content, FrameMax), - State; -handle_message({send_command_and_signal_back, MethodRecord, Parent}, - State = #wstate{sock = Sock, channel = Channel}) -> - ok = internal_send_command_async(Sock, Channel, MethodRecord), - Parent ! rabbit_writer_send_command_signal, - State; -handle_message({send_command_and_signal_back, MethodRecord, Content, Parent}, - State = #wstate{sock = Sock, - channel = Channel, - frame_max = FrameMax}) -> - ok = internal_send_command_async(Sock, Channel, MethodRecord, - Content, FrameMax), - Parent ! rabbit_writer_send_command_signal, +handle_message({send_command, MethodRecord}, State) -> + ok = internal_send_command_async(MethodRecord, State), + State; +handle_message({send_command, MethodRecord, Content}, State) -> + ok = internal_send_command_async(MethodRecord, Content, State), + State; +handle_message({'$gen_call', From, {send_command_sync, MethodRecord}}, State) -> + ok = internal_send_command_async(MethodRecord, State), + gen_server:reply(From, ok), + State; +handle_message({'$gen_call', From, {send_command_sync, MethodRecord, Content}}, + State) -> + ok = internal_send_command_async(MethodRecord, Content, State), + gen_server:reply(From, ok), + State; +handle_message({send_command_and_notify, QPid, ChPid, MethodRecord}, State) -> + ok = internal_send_command_async(MethodRecord, State), + rabbit_amqqueue:notify_sent(QPid, ChPid), State; handle_message({send_command_and_notify, QPid, ChPid, MethodRecord, Content}, - State = #wstate{sock = Sock, - channel = Channel, - frame_max = FrameMax}) -> - ok = internal_send_command_async(Sock, Channel, MethodRecord, - Content, FrameMax), + State) -> + ok = internal_send_command_async(MethodRecord, Content, State), rabbit_amqqueue:notify_sent(QPid, ChPid), State; handle_message({inet_reply, _, ok}, State) -> State; handle_message({inet_reply, _, Status}, _State) -> exit({writer, send_failed, Status}); -handle_message(shutdown, _State) -> - exit(normal); handle_message(Message, _State) -> exit({writer, message_not_understood, Message}). @@ -135,48 +143,71 @@ W ! {send_command, MethodRecord, Content}, ok. -send_command_and_signal_back(W, MethodRecord, Parent) -> - W ! {send_command_and_signal_back, MethodRecord, Parent}, - ok. +send_command_sync(W, MethodRecord) -> + call(W, {send_command_sync, MethodRecord}). -send_command_and_signal_back(W, MethodRecord, Content, Parent) -> - W ! {send_command_and_signal_back, MethodRecord, Content, Parent}, +send_command_sync(W, MethodRecord, Content) -> + call(W, {send_command_sync, MethodRecord, Content}). + +send_command_and_notify(W, Q, ChPid, MethodRecord) -> + W ! {send_command_and_notify, Q, ChPid, MethodRecord}, ok. send_command_and_notify(W, Q, ChPid, MethodRecord, Content) -> W ! {send_command_and_notify, Q, ChPid, MethodRecord, Content}, ok. -shutdown(W) -> - W ! shutdown, - ok. +%--------------------------------------------------------------------------- + +call(Pid, Msg) -> + {ok, Res} = gen:call(Pid, '$gen_call', Msg, infinity), + Res. %--------------------------------------------------------------------------- -assemble_frames(Channel, MethodRecord) -> +assemble_frame(Channel, MethodRecord, Protocol) -> ?LOGMESSAGE(out, Channel, MethodRecord, none), - rabbit_binary_generator:build_simple_method_frame(Channel, MethodRecord). + rabbit_binary_generator:build_simple_method_frame( + Channel, MethodRecord, Protocol). -assemble_frames(Channel, MethodRecord, Content, FrameMax) -> +assemble_frames(Channel, MethodRecord, Content, FrameMax, Protocol) -> ?LOGMESSAGE(out, Channel, MethodRecord, Content), MethodName = rabbit_misc:method_record_type(MethodRecord), - true = rabbit_framing:method_has_content(MethodName), % assertion + true = Protocol:method_has_content(MethodName), % assertion MethodFrame = rabbit_binary_generator:build_simple_method_frame( - Channel, MethodRecord), + Channel, MethodRecord, Protocol), ContentFrames = rabbit_binary_generator:build_simple_content_frames( - Channel, Content, FrameMax), + Channel, Content, FrameMax, Protocol), [MethodFrame | ContentFrames]. +%% We optimise delivery of small messages. Content-bearing methods +%% require at least three frames. Small messages always fit into +%% that. We hand their frames to the Erlang network functions in one +%% go, which may lead to somewhat more efficient processing in the +%% runtime and a greater chance of coalescing into fewer TCP packets. +%% +%% By contrast, for larger messages, split across many frames, we want +%% to allow interleaving of frames on different channels. Hence we +%% hand them to the Erlang network functions one frame at a time. +send_frames(Fun, Sock, Frames) when length(Frames) =< 3 -> + Fun(Sock, Frames); +send_frames(Fun, Sock, Frames) -> + lists:foldl(fun (Frame, ok) -> Fun(Sock, Frame); + (_Frame, Other) -> Other + end, ok, Frames). + tcp_send(Sock, Data) -> rabbit_misc:throw_on_error(inet_error, fun () -> rabbit_net:send(Sock, Data) end). -internal_send_command(Sock, Channel, MethodRecord) -> - ok = tcp_send(Sock, assemble_frames(Channel, MethodRecord)). +internal_send_command(Sock, Channel, MethodRecord, Protocol) -> + ok = tcp_send(Sock, assemble_frame(Channel, MethodRecord, Protocol)). -internal_send_command(Sock, Channel, MethodRecord, Content, FrameMax) -> - ok = tcp_send(Sock, assemble_frames(Channel, MethodRecord, - Content, FrameMax)). +internal_send_command(Sock, Channel, MethodRecord, Content, FrameMax, + Protocol) -> + ok = send_frames(fun tcp_send/2, Sock, + assemble_frames(Channel, MethodRecord, + Content, FrameMax, Protocol)). %% gen_tcp:send/2 does a selective receive of {inet_reply, Sock, %% Status} to obtain the result. That is bad when it is called from @@ -196,16 +227,23 @@ %% Also note that the port has bounded buffers and port_command blocks %% when these are full. So the fact that we process the result %% asynchronously does not impact flow control. -internal_send_command_async(Sock, Channel, MethodRecord) -> - true = port_cmd(Sock, assemble_frames(Channel, MethodRecord)), - ok. - -internal_send_command_async(Sock, Channel, MethodRecord, Content, FrameMax) -> - true = port_cmd(Sock, assemble_frames(Channel, MethodRecord, - Content, FrameMax)), - ok. +internal_send_command_async(MethodRecord, + #wstate{sock = Sock, + channel = Channel, + protocol = Protocol}) -> + ok = port_cmd(Sock, assemble_frame(Channel, MethodRecord, Protocol)). + +internal_send_command_async(MethodRecord, Content, + #wstate{sock = Sock, + channel = Channel, + frame_max = FrameMax, + protocol = Protocol}) -> + ok = send_frames(fun port_cmd/2, Sock, + assemble_frames(Channel, MethodRecord, + Content, FrameMax, Protocol)). port_cmd(Sock, Data) -> - try rabbit_net:port_command(Sock, Data) - catch error:Error -> exit({writer, send_failed, Error}) - end. + true = try rabbit_net:port_command(Sock, Data) + catch error:Error -> exit({writer, send_failed, Error}) + end, + ok. diff -Nru rabbitmq-server-1.7.2/src/supervisor2.erl rabbitmq-server-2.3.1/src/supervisor2.erl --- rabbitmq-server-1.7.2/src/supervisor2.erl 1970-01-01 00:00:00.000000000 +0000 +++ rabbitmq-server-2.3.1/src/supervisor2.erl 2011-02-03 12:47:35.000000000 +0000 @@ -0,0 +1,1015 @@ +%% This file is a copy of supervisor.erl from the R13B-3 Erlang/OTP +%% distribution, with the following modifications: +%% +%% 1) the module name is supervisor2 +%% +%% 2) there is a new strategy called +%% simple_one_for_one_terminate. This is exactly the same as for +%% simple_one_for_one, except that children *are* explicitly +%% terminated as per the shutdown component of the child_spec. +%% +%% 3) child specifications can contain, as the restart type, a tuple +%% {permanent, Delay} | {transient, Delay} where Delay >= 0. The +%% delay, in seconds, indicates what should happen if a child, upon +%% being restarted, exceeds the MaxT and MaxR parameters. Thus, if +%% a child exits, it is restarted as normal. If it exits +%% sufficiently quickly and often to exceed the boundaries set by +%% the MaxT and MaxR parameters, and a Delay is specified, then +%% rather than stopping the supervisor, the supervisor instead +%% continues and tries to start up the child again, Delay seconds +%% later. +%% +%% Note that you can never restart more frequently than the MaxT +%% and MaxR parameters allow: i.e. you must wait until *both* the +%% Delay has passed *and* the MaxT and MaxR parameters allow the +%% child to be restarted. +%% +%% Also note that the Delay is a *minimum*. There is no guarantee +%% that the child will be restarted within that time, especially if +%% other processes are dying and being restarted at the same time - +%% essentially we have to wait for the delay to have passed and for +%% the MaxT and MaxR parameters to permit the child to be +%% restarted. This may require waiting for longer than Delay. +%% +%% 4) Added an 'intrinsic' restart type. Like the transient type, this +%% type means the child should only be restarted if the child exits +%% abnormally. Unlike the transient type, if the child exits +%% normally, the supervisor itself also exits normally. If the +%% child is a supervisor and it exits normally (i.e. with reason of +%% 'shutdown') then the child's parent also exits normally. +%% +%% All modifications are (C) 2010-2011 VMware, Inc. +%% +%% %CopyrightBegin% +%% +%% Copyright Ericsson AB 1996-2009. All Rights Reserved. +%% +%% The contents of this file are subject to the Erlang Public License, +%% Version 1.1, (the "License"); you may not use this file except in +%% compliance with the License. You should have received a copy of the +%% Erlang Public License along with this software. If not, it can be +%% retrieved online at http://www.erlang.org/. +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and limitations +%% under the License. +%% +%% %CopyrightEnd% +%% +-module(supervisor2). + +-behaviour(gen_server). + +%% External exports +-export([start_link/2,start_link/3, + start_child/2, restart_child/2, + delete_child/2, terminate_child/2, + which_children/1, find_child/2, + check_childspecs/1]). + +-export([behaviour_info/1]). + +%% Internal exports +-export([init/1, handle_call/3, handle_info/2, terminate/2, code_change/3]). +-export([handle_cast/2]). +-export([delayed_restart/2]). + +-define(DICT, dict). + +-record(state, {name, + strategy, + children = [], + dynamics = ?DICT:new(), + intensity, + period, + restarts = [], + module, + args}). + +-record(child, {pid = undefined, % pid is undefined when child is not running + name, + mfa, + restart_type, + shutdown, + child_type, + modules = []}). + +-define(is_simple(State), State#state.strategy =:= simple_one_for_one orelse + State#state.strategy =:= simple_one_for_one_terminate). +-define(is_terminate_simple(State), + State#state.strategy =:= simple_one_for_one_terminate). + +behaviour_info(callbacks) -> + [{init,1}]; +behaviour_info(_Other) -> + undefined. + +%%% --------------------------------------------------- +%%% This is a general process supervisor built upon gen_server.erl. +%%% Servers/processes should/could also be built using gen_server.erl. +%%% SupName = {local, atom()} | {global, atom()}. +%%% --------------------------------------------------- +start_link(Mod, Args) -> + gen_server:start_link(?MODULE, {self, Mod, Args}, []). + +start_link(SupName, Mod, Args) -> + gen_server:start_link(SupName, ?MODULE, {SupName, Mod, Args}, []). + +%%% --------------------------------------------------- +%%% Interface functions. +%%% --------------------------------------------------- +start_child(Supervisor, ChildSpec) -> + call(Supervisor, {start_child, ChildSpec}). + +restart_child(Supervisor, Name) -> + call(Supervisor, {restart_child, Name}). + +delete_child(Supervisor, Name) -> + call(Supervisor, {delete_child, Name}). + +%%----------------------------------------------------------------- +%% Func: terminate_child/2 +%% Returns: ok | {error, Reason} +%% Note that the child is *always* terminated in some +%% way (maybe killed). +%%----------------------------------------------------------------- +terminate_child(Supervisor, Name) -> + call(Supervisor, {terminate_child, Name}). + +which_children(Supervisor) -> + call(Supervisor, which_children). + +find_child(Supervisor, Name) -> + [Pid || {Name1, Pid, _Type, _Modules} <- which_children(Supervisor), + Name1 =:= Name]. + +call(Supervisor, Req) -> + gen_server:call(Supervisor, Req, infinity). + +check_childspecs(ChildSpecs) when is_list(ChildSpecs) -> + case check_startspec(ChildSpecs) of + {ok, _} -> ok; + Error -> {error, Error} + end; +check_childspecs(X) -> {error, {badarg, X}}. + +delayed_restart(Supervisor, RestartDetails) -> + gen_server:cast(Supervisor, {delayed_restart, RestartDetails}). + +%%% --------------------------------------------------- +%%% +%%% Initialize the supervisor. +%%% +%%% --------------------------------------------------- +init({SupName, Mod, Args}) -> + process_flag(trap_exit, true), + case Mod:init(Args) of + {ok, {SupFlags, StartSpec}} -> + case init_state(SupName, SupFlags, Mod, Args) of + {ok, State} when ?is_simple(State) -> + init_dynamic(State, StartSpec); + {ok, State} -> + init_children(State, StartSpec); + Error -> + {stop, {supervisor_data, Error}} + end; + ignore -> + ignore; + Error -> + {stop, {bad_return, {Mod, init, Error}}} + end. + +init_children(State, StartSpec) -> + SupName = State#state.name, + case check_startspec(StartSpec) of + {ok, Children} -> + case start_children(Children, SupName) of + {ok, NChildren} -> + {ok, State#state{children = NChildren}}; + {error, NChildren} -> + terminate_children(NChildren, SupName), + {stop, shutdown} + end; + Error -> + {stop, {start_spec, Error}} + end. + +init_dynamic(State, [StartSpec]) -> + case check_startspec([StartSpec]) of + {ok, Children} -> + {ok, State#state{children = Children}}; + Error -> + {stop, {start_spec, Error}} + end; +init_dynamic(_State, StartSpec) -> + {stop, {bad_start_spec, StartSpec}}. + +%%----------------------------------------------------------------- +%% Func: start_children/2 +%% Args: Children = [#child] in start order +%% SupName = {local, atom()} | {global, atom()} | {pid(),Mod} +%% Purpose: Start all children. The new list contains #child's +%% with pids. +%% Returns: {ok, NChildren} | {error, NChildren} +%% NChildren = [#child] in termination order (reversed +%% start order) +%%----------------------------------------------------------------- +start_children(Children, SupName) -> start_children(Children, [], SupName). + +start_children([Child|Chs], NChildren, SupName) -> + case do_start_child(SupName, Child) of + {ok, Pid} -> + start_children(Chs, [Child#child{pid = Pid}|NChildren], SupName); + {ok, Pid, _Extra} -> + start_children(Chs, [Child#child{pid = Pid}|NChildren], SupName); + {error, Reason} -> + report_error(start_error, Reason, Child, SupName), + {error, lists:reverse(Chs) ++ [Child | NChildren]} + end; +start_children([], NChildren, _SupName) -> + {ok, NChildren}. + +do_start_child(SupName, Child) -> + #child{mfa = {M, F, A}} = Child, + case catch apply(M, F, A) of + {ok, Pid} when is_pid(Pid) -> + NChild = Child#child{pid = Pid}, + report_progress(NChild, SupName), + {ok, Pid}; + {ok, Pid, Extra} when is_pid(Pid) -> + NChild = Child#child{pid = Pid}, + report_progress(NChild, SupName), + {ok, Pid, Extra}; + ignore -> + {ok, undefined}; + {error, What} -> {error, What}; + What -> {error, What} + end. + +do_start_child_i(M, F, A) -> + case catch apply(M, F, A) of + {ok, Pid} when is_pid(Pid) -> + {ok, Pid}; + {ok, Pid, Extra} when is_pid(Pid) -> + {ok, Pid, Extra}; + ignore -> + {ok, undefined}; + {error, Error} -> + {error, Error}; + What -> + {error, What} + end. + + +%%% --------------------------------------------------- +%%% +%%% Callback functions. +%%% +%%% --------------------------------------------------- +handle_call({start_child, EArgs}, _From, State) when ?is_simple(State) -> + #child{mfa = {M, F, A}} = hd(State#state.children), + Args = A ++ EArgs, + case do_start_child_i(M, F, Args) of + {ok, Pid} -> + NState = State#state{dynamics = + ?DICT:store(Pid, Args, State#state.dynamics)}, + {reply, {ok, Pid}, NState}; + {ok, Pid, Extra} -> + NState = State#state{dynamics = + ?DICT:store(Pid, Args, State#state.dynamics)}, + {reply, {ok, Pid, Extra}, NState}; + What -> + {reply, What, State} + end; + +%%% The requests terminate_child, delete_child and restart_child are +%%% invalid for simple_one_for_one and simple_one_for_one_terminate +%%% supervisors. +handle_call({_Req, _Data}, _From, State) when ?is_simple(State) -> + {reply, {error, State#state.strategy}, State}; + +handle_call({start_child, ChildSpec}, _From, State) -> + case check_childspec(ChildSpec) of + {ok, Child} -> + {Resp, NState} = handle_start_child(Child, State), + {reply, Resp, NState}; + What -> + {reply, {error, What}, State} + end; + +handle_call({restart_child, Name}, _From, State) -> + case get_child(Name, State) of + {value, Child} when Child#child.pid =:= undefined -> + case do_start_child(State#state.name, Child) of + {ok, Pid} -> + NState = replace_child(Child#child{pid = Pid}, State), + {reply, {ok, Pid}, NState}; + {ok, Pid, Extra} -> + NState = replace_child(Child#child{pid = Pid}, State), + {reply, {ok, Pid, Extra}, NState}; + Error -> + {reply, Error, State} + end; + {value, _} -> + {reply, {error, running}, State}; + _ -> + {reply, {error, not_found}, State} + end; + +handle_call({delete_child, Name}, _From, State) -> + case get_child(Name, State) of + {value, Child} when Child#child.pid =:= undefined -> + NState = remove_child(Child, State), + {reply, ok, NState}; + {value, _} -> + {reply, {error, running}, State}; + _ -> + {reply, {error, not_found}, State} + end; + +handle_call({terminate_child, Name}, _From, State) -> + case get_child(Name, State) of + {value, Child} -> + NChild = do_terminate(Child, State#state.name), + {reply, ok, replace_child(NChild, State)}; + _ -> + {reply, {error, not_found}, State} + end; + +handle_call(which_children, _From, State) when ?is_simple(State) -> + [#child{child_type = CT, modules = Mods}] = State#state.children, + Reply = lists:map(fun ({Pid, _}) -> {undefined, Pid, CT, Mods} end, + ?DICT:to_list(State#state.dynamics)), + {reply, Reply, State}; + +handle_call(which_children, _From, State) -> + Resp = + lists:map(fun (#child{pid = Pid, name = Name, + child_type = ChildType, modules = Mods}) -> + {Name, Pid, ChildType, Mods} + end, + State#state.children), + {reply, Resp, State}. + + +handle_cast({delayed_restart, {RestartType, Reason, Child}}, State) + when ?is_simple(State) -> + {ok, NState} = do_restart(RestartType, Reason, Child, State), + {noreply, NState}; +handle_cast({delayed_restart, {RestartType, Reason, Child}}, State) -> + case get_child(Child#child.name, State) of + {value, Child1} -> + {ok, NState} = do_restart(RestartType, Reason, Child1, State), + {noreply, NState}; + _ -> + {noreply, State} + end; + +%%% Hopefully cause a function-clause as there is no API function +%%% that utilizes cast. +handle_cast(null, State) -> + error_logger:error_msg("ERROR: Supervisor received cast-message 'null'~n", + []), + + {noreply, State}. + +%% +%% Take care of terminated children. +%% +handle_info({'EXIT', Pid, Reason}, State) -> + case restart_child(Pid, Reason, State) of + {ok, State1} -> + {noreply, State1}; + {shutdown, State1} -> + {stop, shutdown, State1} + end; + +handle_info(Msg, State) -> + error_logger:error_msg("Supervisor received unexpected message: ~p~n", + [Msg]), + {noreply, State}. +%% +%% Terminate this server. +%% +terminate(_Reason, State) when ?is_terminate_simple(State) -> + terminate_simple_children( + hd(State#state.children), State#state.dynamics, State#state.name), + ok; +terminate(_Reason, State) -> + terminate_children(State#state.children, State#state.name), + ok. + +%% +%% Change code for the supervisor. +%% Call the new call-back module and fetch the new start specification. +%% Combine the new spec. with the old. If the new start spec. is +%% not valid the code change will not succeed. +%% Use the old Args as argument to Module:init/1. +%% NOTE: This requires that the init function of the call-back module +%% does not have any side effects. +%% +code_change(_, State, _) -> + case (State#state.module):init(State#state.args) of + {ok, {SupFlags, StartSpec}} -> + case catch check_flags(SupFlags) of + ok -> + {Strategy, MaxIntensity, Period} = SupFlags, + update_childspec(State#state{strategy = Strategy, + intensity = MaxIntensity, + period = Period}, + StartSpec); + Error -> + {error, Error} + end; + ignore -> + {ok, State}; + Error -> + Error + end. + +check_flags({Strategy, MaxIntensity, Period}) -> + validStrategy(Strategy), + validIntensity(MaxIntensity), + validPeriod(Period), + ok; +check_flags(What) -> + {bad_flags, What}. + +update_childspec(State, StartSpec) when ?is_simple(State) -> + case check_startspec(StartSpec) of + {ok, [Child]} -> + {ok, State#state{children = [Child]}}; + Error -> + {error, Error} + end; + +update_childspec(State, StartSpec) -> + case check_startspec(StartSpec) of + {ok, Children} -> + OldC = State#state.children, % In reverse start order ! + NewC = update_childspec1(OldC, Children, []), + {ok, State#state{children = NewC}}; + Error -> + {error, Error} + end. + +update_childspec1([Child|OldC], Children, KeepOld) -> + case update_chsp(Child, Children) of + {ok,NewChildren} -> + update_childspec1(OldC, NewChildren, KeepOld); + false -> + update_childspec1(OldC, Children, [Child|KeepOld]) + end; +update_childspec1([], Children, KeepOld) -> + % Return them in (keeped) reverse start order. + lists:reverse(Children ++ KeepOld). + +update_chsp(OldCh, Children) -> + case lists:map(fun (Ch) when OldCh#child.name =:= Ch#child.name -> + Ch#child{pid = OldCh#child.pid}; + (Ch) -> + Ch + end, + Children) of + Children -> + false; % OldCh not found in new spec. + NewC -> + {ok, NewC} + end. + +%%% --------------------------------------------------- +%%% Start a new child. +%%% --------------------------------------------------- + +handle_start_child(Child, State) -> + case get_child(Child#child.name, State) of + false -> + case do_start_child(State#state.name, Child) of + {ok, Pid} -> + Children = State#state.children, + {{ok, Pid}, + State#state{children = + [Child#child{pid = Pid}|Children]}}; + {ok, Pid, Extra} -> + Children = State#state.children, + {{ok, Pid, Extra}, + State#state{children = + [Child#child{pid = Pid}|Children]}}; + {error, What} -> + {{error, {What, Child}}, State} + end; + {value, OldChild} when OldChild#child.pid =/= undefined -> + {{error, {already_started, OldChild#child.pid}}, State}; + {value, _OldChild} -> + {{error, already_present}, State} + end. + +%%% --------------------------------------------------- +%%% Restart. A process has terminated. +%%% Returns: {ok, #state} | {shutdown, #state} +%%% --------------------------------------------------- + +restart_child(Pid, Reason, State) when ?is_simple(State) -> + case ?DICT:find(Pid, State#state.dynamics) of + {ok, Args} -> + [Child] = State#state.children, + RestartType = Child#child.restart_type, + {M, F, _} = Child#child.mfa, + NChild = Child#child{pid = Pid, mfa = {M, F, Args}}, + do_restart(RestartType, Reason, NChild, State); + error -> + {ok, State} + end; +restart_child(Pid, Reason, State) -> + Children = State#state.children, + case lists:keysearch(Pid, #child.pid, Children) of + {value, Child} -> + RestartType = Child#child.restart_type, + do_restart(RestartType, Reason, Child, State); + _ -> + {ok, State} + end. + +do_restart({RestartType, Delay}, Reason, Child, State) -> + case restart1(Child, State) of + {ok, NState} -> + {ok, NState}; + {terminate, NState} -> + {ok, _TRef} = timer:apply_after( + trunc(Delay*1000), ?MODULE, delayed_restart, + [self(), {{RestartType, Delay}, Reason, Child}]), + {ok, state_del_child(Child, NState)} + end; +do_restart(permanent, Reason, Child, State) -> + report_error(child_terminated, Reason, Child, State#state.name), + restart(Child, State); +do_restart(intrinsic, normal, Child, State) -> + {shutdown, state_del_child(Child, State)}; +do_restart(intrinsic, shutdown, Child = #child{child_type = supervisor}, + State) -> + {shutdown, state_del_child(Child, State)}; +do_restart(_, normal, Child, State) -> + NState = state_del_child(Child, State), + {ok, NState}; +do_restart(_, shutdown, Child, State) -> + NState = state_del_child(Child, State), + {ok, NState}; +do_restart(Type, Reason, Child, State) when Type =:= transient orelse + Type =:= intrinsic -> + report_error(child_terminated, Reason, Child, State#state.name), + restart(Child, State); +do_restart(temporary, Reason, Child, State) -> + report_error(child_terminated, Reason, Child, State#state.name), + NState = state_del_child(Child, State), + {ok, NState}. + +restart(Child, State) -> + case add_restart(State) of + {ok, NState} -> + restart(NState#state.strategy, Child, NState, fun restart/2); + {terminate, NState} -> + report_error(shutdown, reached_max_restart_intensity, + Child, State#state.name), + {shutdown, state_del_child(Child, NState)} + end. + +restart1(Child, State) -> + case add_restart(State) of + {ok, NState} -> + restart(NState#state.strategy, Child, NState, fun restart1/2); + {terminate, _NState} -> + %% we've reached the max restart intensity, but the + %% add_restart will have added to the restarts + %% field. Given we don't want to die here, we need to go + %% back to the old restarts field otherwise we'll never + %% attempt to restart later. + {terminate, State} + end. + +restart(Strategy, Child, State, Restart) + when Strategy =:= simple_one_for_one orelse + Strategy =:= simple_one_for_one_terminate -> + #child{mfa = {M, F, A}} = Child, + Dynamics = ?DICT:erase(Child#child.pid, State#state.dynamics), + case do_start_child_i(M, F, A) of + {ok, Pid} -> + NState = State#state{dynamics = ?DICT:store(Pid, A, Dynamics)}, + {ok, NState}; + {ok, Pid, _Extra} -> + NState = State#state{dynamics = ?DICT:store(Pid, A, Dynamics)}, + {ok, NState}; + {error, Error} -> + report_error(start_error, Error, Child, State#state.name), + Restart(Child, State) + end; +restart(one_for_one, Child, State, Restart) -> + case do_start_child(State#state.name, Child) of + {ok, Pid} -> + NState = replace_child(Child#child{pid = Pid}, State), + {ok, NState}; + {ok, Pid, _Extra} -> + NState = replace_child(Child#child{pid = Pid}, State), + {ok, NState}; + {error, Reason} -> + report_error(start_error, Reason, Child, State#state.name), + Restart(Child, State) + end; +restart(rest_for_one, Child, State, Restart) -> + {ChAfter, ChBefore} = split_child(Child#child.pid, State#state.children), + ChAfter2 = terminate_children(ChAfter, State#state.name), + case start_children(ChAfter2, State#state.name) of + {ok, ChAfter3} -> + {ok, State#state{children = ChAfter3 ++ ChBefore}}; + {error, ChAfter3} -> + Restart(Child, State#state{children = ChAfter3 ++ ChBefore}) + end; +restart(one_for_all, Child, State, Restart) -> + Children1 = del_child(Child#child.pid, State#state.children), + Children2 = terminate_children(Children1, State#state.name), + case start_children(Children2, State#state.name) of + {ok, NChs} -> + {ok, State#state{children = NChs}}; + {error, NChs} -> + Restart(Child, State#state{children = NChs}) + end. + +%%----------------------------------------------------------------- +%% Func: terminate_children/2 +%% Args: Children = [#child] in termination order +%% SupName = {local, atom()} | {global, atom()} | {pid(),Mod} +%% Returns: NChildren = [#child] in +%% startup order (reversed termination order) +%%----------------------------------------------------------------- +terminate_children(Children, SupName) -> + terminate_children(Children, SupName, []). + +terminate_children([Child | Children], SupName, Res) -> + NChild = do_terminate(Child, SupName), + terminate_children(Children, SupName, [NChild | Res]); +terminate_children([], _SupName, Res) -> + Res. + +terminate_simple_children(Child, Dynamics, SupName) -> + dict:fold(fun (Pid, _Args, _Any) -> + do_terminate(Child#child{pid = Pid}, SupName) + end, ok, Dynamics), + ok. + +do_terminate(Child, SupName) when Child#child.pid =/= undefined -> + ReportError = fun (Reason) -> + report_error(shutdown_error, Reason, Child, SupName) + end, + case shutdown(Child#child.pid, Child#child.shutdown) of + ok -> + ok; + {error, normal} -> + case Child#child.restart_type of + permanent -> ReportError(normal); + {permanent, _Delay} -> ReportError(normal); + _ -> ok + end; + {error, OtherReason} -> + ReportError(OtherReason) + end, + Child#child{pid = undefined}; +do_terminate(Child, _SupName) -> + Child. + +%%----------------------------------------------------------------- +%% Shutdowns a child. We must check the EXIT value +%% of the child, because it might have died with another reason than +%% the wanted. In that case we want to report the error. We put a +%% monitor on the child an check for the 'DOWN' message instead of +%% checking for the 'EXIT' message, because if we check the 'EXIT' +%% message a "naughty" child, who does unlink(Sup), could hang the +%% supervisor. +%% Returns: ok | {error, OtherReason} (this should be reported) +%%----------------------------------------------------------------- +shutdown(Pid, brutal_kill) -> + + case monitor_child(Pid) of + ok -> + exit(Pid, kill), + receive + {'DOWN', _MRef, process, Pid, killed} -> + ok; + {'DOWN', _MRef, process, Pid, OtherReason} -> + {error, OtherReason} + end; + {error, Reason} -> + {error, Reason} + end; + +shutdown(Pid, Time) -> + + case monitor_child(Pid) of + ok -> + exit(Pid, shutdown), %% Try to shutdown gracefully + receive + {'DOWN', _MRef, process, Pid, shutdown} -> + ok; + {'DOWN', _MRef, process, Pid, OtherReason} -> + {error, OtherReason} + after Time -> + exit(Pid, kill), %% Force termination. + receive + {'DOWN', _MRef, process, Pid, OtherReason} -> + {error, OtherReason} + end + end; + {error, Reason} -> + {error, Reason} + end. + +%% Help function to shutdown/2 switches from link to monitor approach +monitor_child(Pid) -> + + %% Do the monitor operation first so that if the child dies + %% before the monitoring is done causing a 'DOWN'-message with + %% reason noproc, we will get the real reason in the 'EXIT'-message + %% unless a naughty child has already done unlink... + erlang:monitor(process, Pid), + unlink(Pid), + + receive + %% If the child dies before the unlik we must empty + %% the mail-box of the 'EXIT'-message and the 'DOWN'-message. + {'EXIT', Pid, Reason} -> + receive + {'DOWN', _, process, Pid, _} -> + {error, Reason} + end + after 0 -> + %% If a naughty child did unlink and the child dies before + %% monitor the result will be that shutdown/2 receives a + %% 'DOWN'-message with reason noproc. + %% If the child should die after the unlink there + %% will be a 'DOWN'-message with a correct reason + %% that will be handled in shutdown/2. + ok + end. + + +%%----------------------------------------------------------------- +%% Child/State manipulating functions. +%%----------------------------------------------------------------- +state_del_child(#child{pid = Pid}, State) when ?is_simple(State) -> + NDynamics = ?DICT:erase(Pid, State#state.dynamics), + State#state{dynamics = NDynamics}; +state_del_child(Child, State) -> + NChildren = del_child(Child#child.name, State#state.children), + State#state{children = NChildren}. + +del_child(Name, [Ch|Chs]) when Ch#child.name =:= Name -> + [Ch#child{pid = undefined} | Chs]; +del_child(Pid, [Ch|Chs]) when Ch#child.pid =:= Pid -> + [Ch#child{pid = undefined} | Chs]; +del_child(Name, [Ch|Chs]) -> + [Ch|del_child(Name, Chs)]; +del_child(_, []) -> + []. + +%% Chs = [S4, S3, Ch, S1, S0] +%% Ret: {[S4, S3, Ch], [S1, S0]} +split_child(Name, Chs) -> + split_child(Name, Chs, []). + +split_child(Name, [Ch|Chs], After) when Ch#child.name =:= Name -> + {lists:reverse([Ch#child{pid = undefined} | After]), Chs}; +split_child(Pid, [Ch|Chs], After) when Ch#child.pid =:= Pid -> + {lists:reverse([Ch#child{pid = undefined} | After]), Chs}; +split_child(Name, [Ch|Chs], After) -> + split_child(Name, Chs, [Ch | After]); +split_child(_, [], After) -> + {lists:reverse(After), []}. + +get_child(Name, State) -> + lists:keysearch(Name, #child.name, State#state.children). +replace_child(Child, State) -> + Chs = do_replace_child(Child, State#state.children), + State#state{children = Chs}. + +do_replace_child(Child, [Ch|Chs]) when Ch#child.name =:= Child#child.name -> + [Child | Chs]; +do_replace_child(Child, [Ch|Chs]) -> + [Ch|do_replace_child(Child, Chs)]. + +remove_child(Child, State) -> + Chs = lists:keydelete(Child#child.name, #child.name, State#state.children), + State#state{children = Chs}. + +%%----------------------------------------------------------------- +%% Func: init_state/4 +%% Args: SupName = {local, atom()} | {global, atom()} | self +%% Type = {Strategy, MaxIntensity, Period} +%% Strategy = one_for_one | one_for_all | simple_one_for_one | +%% rest_for_one +%% MaxIntensity = integer() +%% Period = integer() +%% Mod :== atom() +%% Arsg :== term() +%% Purpose: Check that Type is of correct type (!) +%% Returns: {ok, #state} | Error +%%----------------------------------------------------------------- +init_state(SupName, Type, Mod, Args) -> + case catch init_state1(SupName, Type, Mod, Args) of + {ok, State} -> + {ok, State}; + Error -> + Error + end. + +init_state1(SupName, {Strategy, MaxIntensity, Period}, Mod, Args) -> + validStrategy(Strategy), + validIntensity(MaxIntensity), + validPeriod(Period), + {ok, #state{name = supname(SupName,Mod), + strategy = Strategy, + intensity = MaxIntensity, + period = Period, + module = Mod, + args = Args}}; +init_state1(_SupName, Type, _, _) -> + {invalid_type, Type}. + +validStrategy(simple_one_for_one_terminate) -> true; +validStrategy(simple_one_for_one) -> true; +validStrategy(one_for_one) -> true; +validStrategy(one_for_all) -> true; +validStrategy(rest_for_one) -> true; +validStrategy(What) -> throw({invalid_strategy, What}). + +validIntensity(Max) when is_integer(Max), + Max >= 0 -> true; +validIntensity(What) -> throw({invalid_intensity, What}). + +validPeriod(Period) when is_integer(Period), + Period > 0 -> true; +validPeriod(What) -> throw({invalid_period, What}). + +supname(self,Mod) -> {self(),Mod}; +supname(N,_) -> N. + +%%% ------------------------------------------------------ +%%% Check that the children start specification is valid. +%%% Shall be a six (6) tuple +%%% {Name, Func, RestartType, Shutdown, ChildType, Modules} +%%% where Name is an atom +%%% Func is {Mod, Fun, Args} == {atom, atom, list} +%%% RestartType is permanent | temporary | transient | +%%% intrinsic | {permanent, Delay} | +%%% {transient, Delay} where Delay >= 0 +%%% Shutdown = integer() | infinity | brutal_kill +%%% ChildType = supervisor | worker +%%% Modules = [atom()] | dynamic +%%% Returns: {ok, [#child]} | Error +%%% ------------------------------------------------------ + +check_startspec(Children) -> check_startspec(Children, []). + +check_startspec([ChildSpec|T], Res) -> + case check_childspec(ChildSpec) of + {ok, Child} -> + case lists:keymember(Child#child.name, #child.name, Res) of + true -> {duplicate_child_name, Child#child.name}; + false -> check_startspec(T, [Child | Res]) + end; + Error -> Error + end; +check_startspec([], Res) -> + {ok, lists:reverse(Res)}. + +check_childspec({Name, Func, RestartType, Shutdown, ChildType, Mods}) -> + catch check_childspec(Name, Func, RestartType, Shutdown, ChildType, Mods); +check_childspec(X) -> {invalid_child_spec, X}. + +check_childspec(Name, Func, RestartType, Shutdown, ChildType, Mods) -> + validName(Name), + validFunc(Func), + validRestartType(RestartType), + validChildType(ChildType), + validShutdown(Shutdown, ChildType), + validMods(Mods), + {ok, #child{name = Name, mfa = Func, restart_type = RestartType, + shutdown = Shutdown, child_type = ChildType, modules = Mods}}. + +validChildType(supervisor) -> true; +validChildType(worker) -> true; +validChildType(What) -> throw({invalid_child_type, What}). + +validName(_Name) -> true. + +validFunc({M, F, A}) when is_atom(M), + is_atom(F), + is_list(A) -> true; +validFunc(Func) -> throw({invalid_mfa, Func}). + +validRestartType(permanent) -> true; +validRestartType(temporary) -> true; +validRestartType(transient) -> true; +validRestartType(intrinsic) -> true; +validRestartType({permanent, Delay}) -> validDelay(Delay); +validRestartType({transient, Delay}) -> validDelay(Delay); +validRestartType(RestartType) -> throw({invalid_restart_type, + RestartType}). + +validDelay(Delay) when is_number(Delay), + Delay >= 0 -> true; +validDelay(What) -> throw({invalid_delay, What}). + +validShutdown(Shutdown, _) + when is_integer(Shutdown), Shutdown > 0 -> true; +validShutdown(infinity, supervisor) -> true; +validShutdown(brutal_kill, _) -> true; +validShutdown(Shutdown, _) -> throw({invalid_shutdown, Shutdown}). + +validMods(dynamic) -> true; +validMods(Mods) when is_list(Mods) -> + lists:foreach(fun (Mod) -> + if + is_atom(Mod) -> ok; + true -> throw({invalid_module, Mod}) + end + end, + Mods); +validMods(Mods) -> throw({invalid_modules, Mods}). + +%%% ------------------------------------------------------ +%%% Add a new restart and calculate if the max restart +%%% intensity has been reached (in that case the supervisor +%%% shall terminate). +%%% All restarts accured inside the period amount of seconds +%%% are kept in the #state.restarts list. +%%% Returns: {ok, State'} | {terminate, State'} +%%% ------------------------------------------------------ + +add_restart(State) -> + I = State#state.intensity, + P = State#state.period, + R = State#state.restarts, + Now = erlang:now(), + R1 = add_restart([Now|R], Now, P), + State1 = State#state{restarts = R1}, + case length(R1) of + CurI when CurI =< I -> + {ok, State1}; + _ -> + {terminate, State1} + end. + +add_restart([R|Restarts], Now, Period) -> + case inPeriod(R, Now, Period) of + true -> + [R|add_restart(Restarts, Now, Period)]; + _ -> + [] + end; +add_restart([], _, _) -> + []. + +inPeriod(Time, Now, Period) -> + case difference(Time, Now) of + T when T > Period -> + false; + _ -> + true + end. + +%% +%% Time = {MegaSecs, Secs, MicroSecs} (NOTE: MicroSecs is ignored) +%% Calculate the time elapsed in seconds between two timestamps. +%% If MegaSecs is equal just subtract Secs. +%% Else calculate the Mega difference and add the Secs difference, +%% note that Secs difference can be negative, e.g. +%% {827, 999999, 676} diff {828, 1, 653753} == > 2 secs. +%% +difference({TimeM, TimeS, _}, {CurM, CurS, _}) when CurM > TimeM -> + ((CurM - TimeM) * 1000000) + (CurS - TimeS); +difference({_, TimeS, _}, {_, CurS, _}) -> + CurS - TimeS. + +%%% ------------------------------------------------------ +%%% Error and progress reporting. +%%% ------------------------------------------------------ + +report_error(Error, Reason, Child, SupName) -> + ErrorMsg = [{supervisor, SupName}, + {errorContext, Error}, + {reason, Reason}, + {offender, extract_child(Child)}], + error_logger:error_report(supervisor_report, ErrorMsg). + + +extract_child(Child) -> + [{pid, Child#child.pid}, + {name, Child#child.name}, + {mfa, Child#child.mfa}, + {restart_type, Child#child.restart_type}, + {shutdown, Child#child.shutdown}, + {child_type, Child#child.child_type}]. + +report_progress(Child, SupName) -> + Progress = [{supervisor, SupName}, + {started, extract_child(Child)}], + error_logger:info_report(progress, Progress). diff -Nru rabbitmq-server-1.7.2/src/tcp_acceptor.erl rabbitmq-server-2.3.1/src/tcp_acceptor.erl --- rabbitmq-server-1.7.2/src/tcp_acceptor.erl 2010-02-15 16:01:03.000000000 +0000 +++ rabbitmq-server-2.3.1/src/tcp_acceptor.erl 2011-02-03 12:47:35.000000000 +0000 @@ -1,32 +1,17 @@ -%% The contents of this file are subject to the Mozilla Public License -%% Version 1.1 (the "License"); you may not use this file except in -%% compliance with the License. You may obtain a copy of the License at -%% http://www.mozilla.org/MPL/ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License +%% at http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and +%% limitations under the License. %% -%% Software distributed under the License is distributed on an "AS IS" -%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the -%% License for the specific language governing rights and limitations -%% under the License. +%% The Original Code is RabbitMQ. %% -%% The Original Code is RabbitMQ. -%% -%% The Initial Developers of the Original Code are LShift Ltd, -%% Cohesive Financial Technologies LLC, and Rabbit Technologies Ltd. -%% -%% Portions created before 22-Nov-2008 00:00:00 GMT by LShift Ltd, -%% Cohesive Financial Technologies LLC, or Rabbit Technologies Ltd -%% are Copyright (C) 2007-2008 LShift Ltd, Cohesive Financial -%% Technologies LLC, and Rabbit Technologies Ltd. -%% -%% Portions created by LShift Ltd are Copyright (C) 2007-2010 LShift -%% Ltd. Portions created by Cohesive Financial Technologies LLC are -%% Copyright (C) 2007-2010 Cohesive Financial Technologies -%% LLC. Portions created by Rabbit Technologies Ltd are Copyright -%% (C) 2007-2010 Rabbit Technologies Ltd. -%% -%% All Rights Reserved. -%% -%% Contributor(s): ______________________________________. +%% The Initial Developer of the Original Code is VMware, Inc. +%% Copyright (c) 2007-2011 VMware, Inc. All rights reserved. %% -module(tcp_acceptor). @@ -55,6 +40,7 @@ {noreply, State}. handle_cast(accept, State) -> + ok = file_handle_cache:obtain(), accept(State); handle_cast(_Msg, State) -> @@ -73,10 +59,18 @@ {Address, Port} = inet_op(fun () -> inet:sockname(LSock) end), {PeerAddress, PeerPort} = inet_op(fun () -> inet:peername(Sock) end), error_logger:info_msg("accepted TCP connection on ~s:~p from ~s:~p~n", - [inet_parse:ntoa(Address), Port, - inet_parse:ntoa(PeerAddress), PeerPort]), + [rabbit_misc:ntoab(Address), Port, + rabbit_misc:ntoab(PeerAddress), PeerPort]), + %% In the event that somebody floods us with connections we can spew + %% the above message at error_logger faster than it can keep up. + %% So error_logger's mailbox grows unbounded until we eat all the + %% memory available and crash. So here's a meaningless synchronous call + %% to the underlying gen_event mechanism - when it returns the mailbox + %% is drained. + gen_event:which_handlers(error_logger), %% handle - apply(M, F, A ++ [Sock]) + file_handle_cache:transfer(apply(M, F, A ++ [Sock])), + ok = file_handle_cache:obtain() catch {inet_error, Reason} -> gen_tcp:close(Sock), error_logger:error_msg("unable to accept TCP connection: ~p~n", @@ -85,11 +79,13 @@ %% accept more accept(State); + handle_info({inet_async, LSock, Ref, {error, closed}}, State=#state{sock=LSock, ref=Ref}) -> %% It would be wrong to attempt to restart the acceptor when we %% know this will fail. {stop, normal, State}; + handle_info(_Info, State) -> {noreply, State}. diff -Nru rabbitmq-server-1.7.2/src/tcp_acceptor_sup.erl rabbitmq-server-2.3.1/src/tcp_acceptor_sup.erl --- rabbitmq-server-1.7.2/src/tcp_acceptor_sup.erl 2010-02-15 16:01:03.000000000 +0000 +++ rabbitmq-server-2.3.1/src/tcp_acceptor_sup.erl 2011-02-03 12:47:35.000000000 +0000 @@ -1,32 +1,17 @@ -%% The contents of this file are subject to the Mozilla Public License -%% Version 1.1 (the "License"); you may not use this file except in -%% compliance with the License. You may obtain a copy of the License at -%% http://www.mozilla.org/MPL/ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License +%% at http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and +%% limitations under the License. %% -%% Software distributed under the License is distributed on an "AS IS" -%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the -%% License for the specific language governing rights and limitations -%% under the License. +%% The Original Code is RabbitMQ. %% -%% The Original Code is RabbitMQ. -%% -%% The Initial Developers of the Original Code are LShift Ltd, -%% Cohesive Financial Technologies LLC, and Rabbit Technologies Ltd. -%% -%% Portions created before 22-Nov-2008 00:00:00 GMT by LShift Ltd, -%% Cohesive Financial Technologies LLC, or Rabbit Technologies Ltd -%% are Copyright (C) 2007-2008 LShift Ltd, Cohesive Financial -%% Technologies LLC, and Rabbit Technologies Ltd. -%% -%% Portions created by LShift Ltd are Copyright (C) 2007-2010 LShift -%% Ltd. Portions created by Cohesive Financial Technologies LLC are -%% Copyright (C) 2007-2010 Cohesive Financial Technologies -%% LLC. Portions created by Rabbit Technologies Ltd are Copyright -%% (C) 2007-2010 Rabbit Technologies Ltd. -%% -%% All Rights Reserved. -%% -%% Contributor(s): ______________________________________. +%% The Initial Developer of the Original Code is VMware, Inc. +%% Copyright (c) 2007-2011 VMware, Inc. All rights reserved. %% -module(tcp_acceptor_sup). diff -Nru rabbitmq-server-1.7.2/src/tcp_client_sup.erl rabbitmq-server-2.3.1/src/tcp_client_sup.erl --- rabbitmq-server-1.7.2/src/tcp_client_sup.erl 2010-02-15 16:01:03.000000000 +0000 +++ rabbitmq-server-2.3.1/src/tcp_client_sup.erl 1970-01-01 00:00:00.000000000 +0000 @@ -1,49 +0,0 @@ -%% The contents of this file are subject to the Mozilla Public License -%% Version 1.1 (the "License"); you may not use this file except in -%% compliance with the License. You may obtain a copy of the License at -%% http://www.mozilla.org/MPL/ -%% -%% Software distributed under the License is distributed on an "AS IS" -%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the -%% License for the specific language governing rights and limitations -%% under the License. -%% -%% The Original Code is RabbitMQ. -%% -%% The Initial Developers of the Original Code are LShift Ltd, -%% Cohesive Financial Technologies LLC, and Rabbit Technologies Ltd. -%% -%% Portions created before 22-Nov-2008 00:00:00 GMT by LShift Ltd, -%% Cohesive Financial Technologies LLC, or Rabbit Technologies Ltd -%% are Copyright (C) 2007-2008 LShift Ltd, Cohesive Financial -%% Technologies LLC, and Rabbit Technologies Ltd. -%% -%% Portions created by LShift Ltd are Copyright (C) 2007-2010 LShift -%% Ltd. Portions created by Cohesive Financial Technologies LLC are -%% Copyright (C) 2007-2010 Cohesive Financial Technologies -%% LLC. Portions created by Rabbit Technologies Ltd are Copyright -%% (C) 2007-2010 Rabbit Technologies Ltd. -%% -%% All Rights Reserved. -%% -%% Contributor(s): ______________________________________. -%% - --module(tcp_client_sup). - --behaviour(supervisor). - --export([start_link/1, start_link/2]). - --export([init/1]). - -start_link(Callback) -> - supervisor:start_link(?MODULE, Callback). - -start_link(SupName, Callback) -> - supervisor:start_link(SupName, ?MODULE, Callback). - -init({M,F,A}) -> - {ok, {{simple_one_for_one, 10, 10}, - [{tcp_client, {M,F,A}, - temporary, brutal_kill, worker, [M]}]}}. diff -Nru rabbitmq-server-1.7.2/src/tcp_listener.erl rabbitmq-server-2.3.1/src/tcp_listener.erl --- rabbitmq-server-1.7.2/src/tcp_listener.erl 2010-02-15 16:01:03.000000000 +0000 +++ rabbitmq-server-2.3.1/src/tcp_listener.erl 2011-02-03 12:47:35.000000000 +0000 @@ -1,32 +1,17 @@ -%% The contents of this file are subject to the Mozilla Public License -%% Version 1.1 (the "License"); you may not use this file except in -%% compliance with the License. You may obtain a copy of the License at -%% http://www.mozilla.org/MPL/ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License +%% at http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and +%% limitations under the License. %% -%% Software distributed under the License is distributed on an "AS IS" -%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the -%% License for the specific language governing rights and limitations -%% under the License. +%% The Original Code is RabbitMQ. %% -%% The Original Code is RabbitMQ. -%% -%% The Initial Developers of the Original Code are LShift Ltd, -%% Cohesive Financial Technologies LLC, and Rabbit Technologies Ltd. -%% -%% Portions created before 22-Nov-2008 00:00:00 GMT by LShift Ltd, -%% Cohesive Financial Technologies LLC, or Rabbit Technologies Ltd -%% are Copyright (C) 2007-2008 LShift Ltd, Cohesive Financial -%% Technologies LLC, and Rabbit Technologies Ltd. -%% -%% Portions created by LShift Ltd are Copyright (C) 2007-2010 LShift -%% Ltd. Portions created by Cohesive Financial Technologies LLC are -%% Copyright (C) 2007-2010 Cohesive Financial Technologies -%% LLC. Portions created by Rabbit Technologies Ltd are Copyright -%% (C) 2007-2010 Rabbit Technologies Ltd. -%% -%% All Rights Reserved. -%% -%% Contributor(s): ______________________________________. +%% The Initial Developer of the Original Code is VMware, Inc. +%% Copyright (c) 2007-2011 VMware, Inc. All rights reserved. %% -module(tcp_listener). @@ -65,8 +50,9 @@ end, lists:duplicate(ConcurrentAcceptorCount, dummy)), {ok, {LIPAddress, LPort}} = inet:sockname(LSock), - error_logger:info_msg("started ~s on ~s:~p~n", - [Label, inet_parse:ntoa(LIPAddress), LPort]), + error_logger:info_msg( + "started ~s on ~s:~p~n", + [Label, rabbit_misc:ntoab(LIPAddress), LPort]), apply(M, F, A ++ [IPAddress, Port]), {ok, #state{sock = LSock, on_startup = OnStartup, on_shutdown = OnShutdown, @@ -74,7 +60,7 @@ {error, Reason} -> error_logger:error_msg( "failed to start ~s on ~s:~p - ~p~n", - [Label, inet_parse:ntoa(IPAddress), Port, Reason]), + [Label, rabbit_misc:ntoab(IPAddress), Port, Reason]), {stop, {cannot_listen, IPAddress, Port, Reason}} end. @@ -91,7 +77,7 @@ {ok, {IPAddress, Port}} = inet:sockname(LSock), gen_tcp:close(LSock), error_logger:info_msg("stopped ~s on ~s:~p~n", - [Label, inet_parse:ntoa(IPAddress), Port]), + [Label, rabbit_misc:ntoab(IPAddress), Port]), apply(M, F, A ++ [IPAddress, Port]). code_change(_OldVsn, State, _Extra) -> diff -Nru rabbitmq-server-1.7.2/src/tcp_listener_sup.erl rabbitmq-server-2.3.1/src/tcp_listener_sup.erl --- rabbitmq-server-1.7.2/src/tcp_listener_sup.erl 2010-02-15 16:01:03.000000000 +0000 +++ rabbitmq-server-2.3.1/src/tcp_listener_sup.erl 2011-02-03 12:47:35.000000000 +0000 @@ -1,32 +1,17 @@ -%% The contents of this file are subject to the Mozilla Public License -%% Version 1.1 (the "License"); you may not use this file except in -%% compliance with the License. You may obtain a copy of the License at -%% http://www.mozilla.org/MPL/ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License +%% at http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and +%% limitations under the License. %% -%% Software distributed under the License is distributed on an "AS IS" -%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the -%% License for the specific language governing rights and limitations -%% under the License. +%% The Original Code is RabbitMQ. %% -%% The Original Code is RabbitMQ. -%% -%% The Initial Developers of the Original Code are LShift Ltd, -%% Cohesive Financial Technologies LLC, and Rabbit Technologies Ltd. -%% -%% Portions created before 22-Nov-2008 00:00:00 GMT by LShift Ltd, -%% Cohesive Financial Technologies LLC, or Rabbit Technologies Ltd -%% are Copyright (C) 2007-2008 LShift Ltd, Cohesive Financial -%% Technologies LLC, and Rabbit Technologies Ltd. -%% -%% Portions created by LShift Ltd are Copyright (C) 2007-2010 LShift -%% Ltd. Portions created by Cohesive Financial Technologies LLC are -%% Copyright (C) 2007-2010 Cohesive Financial Technologies -%% LLC. Portions created by Rabbit Technologies Ltd are Copyright -%% (C) 2007-2010 Rabbit Technologies Ltd. -%% -%% All Rights Reserved. -%% -%% Contributor(s): ______________________________________. +%% The Initial Developer of the Original Code is VMware, Inc. +%% Copyright (c) 2007-2011 VMware, Inc. All rights reserved. %% -module(tcp_listener_sup). @@ -63,4 +48,4 @@ [IPAddress, Port, SocketOpts, ConcurrentAcceptorCount, Name, OnStartup, OnShutdown, Label]}, - transient, 100, worker, [tcp_listener]}]}}. + transient, 16#ffffffff, worker, [tcp_listener]}]}}. diff -Nru rabbitmq-server-1.7.2/src/test_sup.erl rabbitmq-server-2.3.1/src/test_sup.erl --- rabbitmq-server-1.7.2/src/test_sup.erl 1970-01-01 00:00:00.000000000 +0000 +++ rabbitmq-server-2.3.1/src/test_sup.erl 2011-02-03 12:47:35.000000000 +0000 @@ -0,0 +1,81 @@ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License +%% at http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and +%% limitations under the License. +%% +%% The Original Code is RabbitMQ. +%% +%% The Initial Developer of the Original Code is VMware, Inc. +%% Copyright (c) 2007-2011 VMware, Inc. All rights reserved. +%% + +-module(test_sup). + +-behaviour(supervisor2). + +-export([test_supervisor_delayed_restart/0, + init/1, start_child/0]). + +test_supervisor_delayed_restart() -> + passed = with_sup(simple_one_for_one_terminate, + fun (SupPid) -> + {ok, _ChildPid} = + supervisor2:start_child(SupPid, []), + test_supervisor_delayed_restart(SupPid) + end), + passed = with_sup(one_for_one, fun test_supervisor_delayed_restart/1). + +test_supervisor_delayed_restart(SupPid) -> + ok = ping_child(SupPid), + ok = exit_child(SupPid), + timer:sleep(10), + ok = ping_child(SupPid), + ok = exit_child(SupPid), + timer:sleep(10), + timeout = ping_child(SupPid), + timer:sleep(1010), + ok = ping_child(SupPid), + passed. + +with_sup(RestartStrategy, Fun) -> + {ok, SupPid} = supervisor2:start_link(?MODULE, [RestartStrategy]), + Res = Fun(SupPid), + exit(SupPid, shutdown), + rabbit_misc:unlink_and_capture_exit(SupPid), + Res. + +init([RestartStrategy]) -> + {ok, {{RestartStrategy, 1, 1}, + [{test, {test_sup, start_child, []}, {permanent, 1}, + 16#ffffffff, worker, [test_sup]}]}}. + +start_child() -> + {ok, proc_lib:spawn_link(fun run_child/0)}. + +ping_child(SupPid) -> + Ref = make_ref(), + with_child_pid(SupPid, fun(ChildPid) -> ChildPid ! {ping, Ref, self()} end), + receive {pong, Ref} -> ok + after 1000 -> timeout + end. + +exit_child(SupPid) -> + with_child_pid(SupPid, fun(ChildPid) -> exit(ChildPid, abnormal) end), + ok. + +with_child_pid(SupPid, Fun) -> + case supervisor2:which_children(SupPid) of + [{_Id, undefined, worker, [test_sup]}] -> ok; + [{_Id, ChildPid, worker, [test_sup]}] -> Fun(ChildPid); + [] -> ok + end. + +run_child() -> + receive {ping, Ref, Pid} -> Pid ! {pong, Ref}, + run_child() + end. diff -Nru rabbitmq-server-1.7.2/src/vm_memory_monitor.erl rabbitmq-server-2.3.1/src/vm_memory_monitor.erl --- rabbitmq-server-1.7.2/src/vm_memory_monitor.erl 2010-02-15 16:01:03.000000000 +0000 +++ rabbitmq-server-2.3.1/src/vm_memory_monitor.erl 2011-02-03 12:47:35.000000000 +0000 @@ -1,32 +1,17 @@ -%% The contents of this file are subject to the Mozilla Public License -%% Version 1.1 (the "License"); you may not use this file except in -%% compliance with the License. You may obtain a copy of the License at -%% http://www.mozilla.org/MPL/ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License +%% at http://www.mozilla.org/MPL/ %% -%% Software distributed under the License is distributed on an "AS IS" -%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the -%% License for the specific language governing rights and limitations -%% under the License. +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and +%% limitations under the License. %% -%% The Original Code is RabbitMQ. +%% The Original Code is RabbitMQ. %% -%% The Initial Developers of the Original Code are LShift Ltd, -%% Cohesive Financial Technologies LLC, and Rabbit Technologies Ltd. -%% -%% Portions created before 22-Nov-2008 00:00:00 GMT by LShift Ltd, -%% Cohesive Financial Technologies LLC, or Rabbit Technologies Ltd -%% are Copyright (C) 2007-2008 LShift Ltd, Cohesive Financial -%% Technologies LLC, and Rabbit Technologies Ltd. -%% -%% Portions created by LShift Ltd are Copyright (C) 2007-2010 LShift -%% Ltd. Portions created by Cohesive Financial Technologies LLC are -%% Copyright (C) 2007-2010 Cohesive Financial Technologies -%% LLC. Portions created by Rabbit Technologies Ltd are Copyright -%% (C) 2007-2010 Rabbit Technologies Ltd. -%% -%% All Rights Reserved. -%% -%% Contributor(s): ______________________________________. +%% The Initial Developer of the Original Code is VMware, Inc. +%% Copyright (c) 2007-2011 VMware, Inc. All rights reserved. %% %% In practice Erlang shouldn't be allowed to grow to more than a half @@ -47,7 +32,7 @@ -export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2, code_change/3]). --export([update/0, get_total_memory/0, +-export([update/0, get_total_memory/0, get_vm_limit/0, get_check_interval/0, set_check_interval/1, get_vm_memory_high_watermark/0, set_vm_memory_high_watermark/1, get_memory_limit/0]). @@ -72,12 +57,11 @@ -ifdef(use_specs). --spec(start_link/1 :: (float()) -> - ('ignore' | {'error', any()} | {'ok', pid()})). +-spec(start_link/1 :: (float()) -> {'ok', pid()} | {'error', any()}). -spec(update/0 :: () -> 'ok'). -spec(get_total_memory/0 :: () -> (non_neg_integer() | 'unknown')). --spec(get_vm_limit/0 :: () -> (non_neg_integer() | 'unknown')). --spec(get_memory_limit/0 :: () -> (non_neg_integer() | 'undefined')). +-spec(get_vm_limit/0 :: () -> non_neg_integer()). +-spec(get_memory_limit/0 :: () -> non_neg_integer()). -spec(get_check_interval/0 :: () -> non_neg_integer()). -spec(set_check_interval/1 :: (non_neg_integer()) -> 'ok'). -spec(get_vm_memory_high_watermark/0 :: () -> float()). @@ -85,7 +69,6 @@ -endif. - %%---------------------------------------------------------------------------- %% Public API %%---------------------------------------------------------------------------- @@ -297,6 +280,12 @@ Dict = dict:from_list(lists:map(fun parse_line_sunos/1, Lines)), dict:fetch('Memory size', Dict); +get_total_memory({unix, aix}) -> + File = cmd("/usr/bin/vmstat -v"), + Lines = string:tokens(File, "\n"), + Dict = dict:from_list(lists:map(fun parse_line_aix/1, Lines)), + dict:fetch('memory pages', Dict) * 4096; + get_total_memory(_OsType) -> unknown. @@ -342,6 +331,17 @@ [Name] -> {list_to_atom(Name), none} end. +%% Lines look like " 12345 memory pages" +%% or " 80.1 maxpin percentage" +parse_line_aix(Line) -> + [Value | NameWords] = string:tokens(Line, " "), + Name = string:join(NameWords, " "), + {list_to_atom(Name), + case lists:member($., Value) of + true -> trunc(list_to_float(Value)); + false -> list_to_integer(Value) + end}. + freebsd_sysctl(Def) -> list_to_integer(cmd("/sbin/sysctl -n " ++ Def) -- "\n"). diff -Nru rabbitmq-server-1.7.2/src/worker_pool.erl rabbitmq-server-2.3.1/src/worker_pool.erl --- rabbitmq-server-1.7.2/src/worker_pool.erl 1970-01-01 00:00:00.000000000 +0000 +++ rabbitmq-server-2.3.1/src/worker_pool.erl 2011-02-03 12:47:35.000000000 +0000 @@ -0,0 +1,140 @@ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License +%% at http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and +%% limitations under the License. +%% +%% The Original Code is RabbitMQ. +%% +%% The Initial Developer of the Original Code is VMware, Inc. +%% Copyright (c) 2007-2011 VMware, Inc. All rights reserved. +%% + +-module(worker_pool). + +%% Generic worker pool manager. +%% +%% Supports nested submission of jobs (nested jobs always run +%% immediately in current worker process). +%% +%% Possible future enhancements: +%% +%% 1. Allow priorities (basically, change the pending queue to a +%% priority_queue). + +-behaviour(gen_server2). + +-export([start_link/0, submit/1, submit_async/1, idle/1]). + +-export([init/1, handle_call/3, handle_cast/2, handle_info/2, + terminate/2, code_change/3]). + +%%---------------------------------------------------------------------------- + +-ifdef(use_specs). + +-spec(start_link/0 :: () -> {'ok', pid()} | {'error', any()}). +-spec(submit/1 :: (fun (() -> A) | {atom(), atom(), [any()]}) -> A). +-spec(submit_async/1 :: + (fun (() -> any()) | {atom(), atom(), [any()]}) -> 'ok'). + +-endif. + +%%---------------------------------------------------------------------------- + +-define(SERVER, ?MODULE). +-define(HIBERNATE_AFTER_MIN, 1000). +-define(DESIRED_HIBERNATE, 10000). + +-record(state, { available, pending }). + +%%---------------------------------------------------------------------------- + +start_link() -> + gen_server2:start_link({local, ?SERVER}, ?MODULE, [], + [{timeout, infinity}]). + +submit(Fun) -> + case get(worker_pool_worker) of + true -> worker_pool_worker:run(Fun); + _ -> Pid = gen_server2:call(?SERVER, next_free, infinity), + worker_pool_worker:submit(Pid, Fun) + end. + +submit_async(Fun) -> + gen_server2:cast(?SERVER, {run_async, Fun}). + +idle(WId) -> + gen_server2:cast(?SERVER, {idle, WId}). + +%%---------------------------------------------------------------------------- + +init([]) -> + {ok, #state { pending = queue:new(), available = queue:new() }, hibernate, + {backoff, ?HIBERNATE_AFTER_MIN, ?HIBERNATE_AFTER_MIN, ?DESIRED_HIBERNATE}}. + +handle_call(next_free, From, State = #state { available = Avail, + pending = Pending }) -> + case queue:out(Avail) of + {empty, _Avail} -> + {noreply, + State #state { pending = queue:in({next_free, From}, Pending) }, + hibernate}; + {{value, WId}, Avail1} -> + {reply, get_worker_pid(WId), State #state { available = Avail1 }, + hibernate} + end; + +handle_call(Msg, _From, State) -> + {stop, {unexpected_call, Msg}, State}. + +handle_cast({idle, WId}, State = #state { available = Avail, + pending = Pending }) -> + {noreply, case queue:out(Pending) of + {empty, _Pending} -> + State #state { available = queue:in(WId, Avail) }; + {{value, {next_free, From}}, Pending1} -> + gen_server2:reply(From, get_worker_pid(WId)), + State #state { pending = Pending1 }; + {{value, {run_async, Fun}}, Pending1} -> + worker_pool_worker:submit_async(get_worker_pid(WId), Fun), + State #state { pending = Pending1 } + end, hibernate}; + +handle_cast({run_async, Fun}, State = #state { available = Avail, + pending = Pending }) -> + {noreply, + case queue:out(Avail) of + {empty, _Avail} -> + State #state { pending = queue:in({run_async, Fun}, Pending)}; + {{value, WId}, Avail1} -> + worker_pool_worker:submit_async(get_worker_pid(WId), Fun), + State #state { available = Avail1 } + end, hibernate}; + +handle_cast(Msg, State) -> + {stop, {unexpected_cast, Msg}, State}. + +handle_info(Msg, State) -> + {stop, {unexpected_info, Msg}, State}. + +code_change(_OldVsn, State, _Extra) -> + {ok, State}. + +terminate(_Reason, State) -> + State. + +%%---------------------------------------------------------------------------- + +get_worker_pid(WId) -> + [{WId, Pid, _Type, _Modules} | _] = + lists:dropwhile(fun ({Id, _Pid, _Type, _Modules}) + when Id =:= WId -> false; + (_) -> true + end, + supervisor:which_children(worker_pool_sup)), + Pid. diff -Nru rabbitmq-server-1.7.2/src/worker_pool_sup.erl rabbitmq-server-2.3.1/src/worker_pool_sup.erl --- rabbitmq-server-1.7.2/src/worker_pool_sup.erl 1970-01-01 00:00:00.000000000 +0000 +++ rabbitmq-server-2.3.1/src/worker_pool_sup.erl 2011-02-03 12:47:35.000000000 +0000 @@ -0,0 +1,53 @@ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License +%% at http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and +%% limitations under the License. +%% +%% The Original Code is RabbitMQ. +%% +%% The Initial Developer of the Original Code is VMware, Inc. +%% Copyright (c) 2007-2011 VMware, Inc. All rights reserved. +%% + +-module(worker_pool_sup). + +-behaviour(supervisor). + +-export([start_link/0, start_link/1]). + +-export([init/1]). + +%%---------------------------------------------------------------------------- + +-ifdef(use_specs). + +-spec(start_link/0 :: () -> {'ok', pid()} | {'error', any()}). +-spec(start_link/1 :: (non_neg_integer()) -> {'ok', pid()} | {'error', any()}). + +-endif. + +%%---------------------------------------------------------------------------- + +-define(SERVER, ?MODULE). + +%%---------------------------------------------------------------------------- + +start_link() -> + start_link(erlang:system_info(schedulers)). + +start_link(WCount) -> + supervisor:start_link({local, ?SERVER}, ?MODULE, [WCount]). + +%%---------------------------------------------------------------------------- + +init([WCount]) -> + {ok, {{one_for_one, 10, 10}, + [{worker_pool, {worker_pool, start_link, []}, transient, + 16#ffffffff, worker, [worker_pool]} | + [{N, {worker_pool_worker, start_link, [N]}, transient, 16#ffffffff, + worker, [worker_pool_worker]} || N <- lists:seq(1, WCount)]]}}. diff -Nru rabbitmq-server-1.7.2/src/worker_pool_worker.erl rabbitmq-server-2.3.1/src/worker_pool_worker.erl --- rabbitmq-server-1.7.2/src/worker_pool_worker.erl 1970-01-01 00:00:00.000000000 +0000 +++ rabbitmq-server-2.3.1/src/worker_pool_worker.erl 2011-02-03 12:47:35.000000000 +0000 @@ -0,0 +1,106 @@ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License +%% at http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and +%% limitations under the License. +%% +%% The Original Code is RabbitMQ. +%% +%% The Initial Developer of the Original Code is VMware, Inc. +%% Copyright (c) 2007-2011 VMware, Inc. All rights reserved. +%% + +-module(worker_pool_worker). + +-behaviour(gen_server2). + +-export([start_link/1, submit/2, submit_async/2, run/1]). + +-export([set_maximum_since_use/2]). + +-export([init/1, handle_call/3, handle_cast/2, handle_info/2, + terminate/2, code_change/3, prioritise_cast/2]). + +%%---------------------------------------------------------------------------- + +-ifdef(use_specs). + +-spec(start_link/1 :: (any()) -> {'ok', pid()} | {'error', any()}). +-spec(submit/2 :: (pid(), fun (() -> A) | {atom(), atom(), [any()]}) -> A). +-spec(submit_async/2 :: + (pid(), fun (() -> any()) | {atom(), atom(), [any()]}) -> 'ok'). +-spec(run/1 :: (fun (() -> A)) -> A; + ({atom(), atom(), [any()]}) -> any()). +-spec(set_maximum_since_use/2 :: (pid(), non_neg_integer()) -> 'ok'). + +-endif. + +%%---------------------------------------------------------------------------- + +-define(HIBERNATE_AFTER_MIN, 1000). +-define(DESIRED_HIBERNATE, 10000). + +%%---------------------------------------------------------------------------- + +start_link(WId) -> + gen_server2:start_link(?MODULE, [WId], [{timeout, infinity}]). + +submit(Pid, Fun) -> + gen_server2:call(Pid, {submit, Fun}, infinity). + +submit_async(Pid, Fun) -> + gen_server2:cast(Pid, {submit_async, Fun}). + +set_maximum_since_use(Pid, Age) -> + gen_server2:cast(Pid, {set_maximum_since_use, Age}). + +run({M, F, A}) -> + apply(M, F, A); +run(Fun) -> + Fun(). + +%%---------------------------------------------------------------------------- + +init([WId]) -> + ok = file_handle_cache:register_callback(?MODULE, set_maximum_since_use, + [self()]), + ok = worker_pool:idle(WId), + put(worker_pool_worker, true), + {ok, WId, hibernate, + {backoff, ?HIBERNATE_AFTER_MIN, ?HIBERNATE_AFTER_MIN, ?DESIRED_HIBERNATE}}. + +prioritise_cast({set_maximum_since_use, _Age}, _State) -> 8; +prioritise_cast(_Msg, _State) -> 0. + +handle_call({submit, Fun}, From, WId) -> + gen_server2:reply(From, run(Fun)), + ok = worker_pool:idle(WId), + {noreply, WId, hibernate}; + +handle_call(Msg, _From, State) -> + {stop, {unexpected_call, Msg}, State}. + +handle_cast({submit_async, Fun}, WId) -> + run(Fun), + ok = worker_pool:idle(WId), + {noreply, WId, hibernate}; + +handle_cast({set_maximum_since_use, Age}, WId) -> + ok = file_handle_cache:set_maximum_since_use(Age), + {noreply, WId, hibernate}; + +handle_cast(Msg, State) -> + {stop, {unexpected_cast, Msg}, State}. + +handle_info(Msg, State) -> + {stop, {unexpected_info, Msg}, State}. + +code_change(_OldVsn, State, _Extra) -> + {ok, State}. + +terminate(_Reason, State) -> + State.