diff -Nru pcs-0.9.151/debian/changelog pcs-0.9.153/debian/changelog --- pcs-0.9.151/debian/changelog 2016-06-20 10:31:07.000000000 +0000 +++ pcs-0.9.153/debian/changelog 2016-07-30 15:20:30.000000000 +0000 @@ -1,3 +1,22 @@ +pcs (0.9.153-2) unstable; urgency=medium + + * Fix package description + * Drop unused dependencies: ruby-thor, ruby-tilt + * Re-enable the upstream init script + * Fix test fail with ruby-json 2.0 (Closes: #832172) + * Avoid depending on thread finish order in tests + + -- Valentin Vidic Sat, 30 Jul 2016 11:28:50 +0200 + +pcs (0.9.153-1) unstable; urgency=medium + + * New upstream version. + * Update upstream homepage. + * Update IPv6 bind patch to work with rack. + * Drop unused ruby-eventmachine dependency. + + -- Valentin Vidic Tue, 05 Jul 2016 13:32:45 +0200 + pcs (0.9.151-1) unstable; urgency=medium [ Valentin Vidic ] diff -Nru pcs-0.9.151/debian/control pcs-0.9.153/debian/control --- pcs-0.9.151/debian/control 2016-06-20 14:03:43.000000000 +0000 +++ pcs-0.9.153/debian/control 2016-07-30 07:58:34.000000000 +0000 @@ -10,10 +10,9 @@ dh-systemd, libpam0g-dev, python-all (>= 2.6.6-3~), - python-setuptools, - systemd + python-setuptools Standards-Version: 3.9.8 -Homepage: https://github.com/feist/pcs +Homepage: https://github.com/ClusterLabs/pcs Vcs-Git: https://alioth.debian.org/anonscm/git/debian-ha/pcs.git Vcs-Browser: http://anonscm.debian.org/cgit/debian-ha/pcs.git @@ -30,7 +29,6 @@ ruby, ruby-activesupport, ruby-backports, - ruby-eventmachine, ruby-highline, ruby-json, ruby-multi-json, @@ -41,8 +39,6 @@ ruby-rpam-ruby19, ruby-sinatra, ruby-sinatra-contrib, - ruby-thor, - ruby-tilt, rubygems-integration Recommends: pacemaker (>= 1.1.12) Replaces: pacemaker (<< 1.1.12) @@ -51,7 +47,7 @@ # conflict with it so it isn't loaded accidentally Conflicts: python-pcs Description: Pacemaker Configuration System - pcs is a corosync and pacemaker configuration tool. It permits + pcs is a corosync and pacemaker configuration tool. It permits users to easily view, modify and create pacemaker based clusters. . pcs also provides pcsd, which operates as a GUI and remote server diff -Nru pcs-0.9.151/debian/patches/0007-Fix-IPv6-bind.patch pcs-0.9.153/debian/patches/0007-Fix-IPv6-bind.patch --- pcs-0.9.151/debian/patches/0007-Fix-IPv6-bind.patch 2016-06-20 10:10:18.000000000 +0000 +++ pcs-0.9.153/debian/patches/0007-Fix-IPv6-bind.patch 2016-07-30 07:58:34.000000000 +0000 @@ -33,7 +33,7 @@ The problem is caused by the IPV6_V6ONLY socket option being set: setsockopt(7, SOL_IPV6, IPV6_V6ONLY, [1], 4) = 0 . - On ruby 2.1 using ``nil`` in place of ``::`` has the same effect so + On ruby 2.1 using ``*`` in place of ``::`` has the same effect so this ruby version check is added to the bind code in pcsd. Author: Valentin Vidic Bug: https://github.com/ClusterLabs/pcs/issues/51 @@ -47,7 +47,7 @@ end +if primary_addr == '::' and RUBY_VERSION >= '2.1' -+ primary_addr = nil ++ primary_addr = '*' +end + webrick_options = { diff -Nru pcs-0.9.151/debian/patches/0008-Fix-corosync-log.patch pcs-0.9.153/debian/patches/0008-Fix-corosync-log.patch --- pcs-0.9.151/debian/patches/0008-Fix-corosync-log.patch 2016-06-20 14:03:43.000000000 +0000 +++ pcs-0.9.153/debian/patches/0008-Fix-corosync-log.patch 2016-07-30 07:58:34.000000000 +0000 @@ -1,6 +1,6 @@ --- a/pcs/cluster.py +++ b/pcs/cluster.py -@@ -709,7 +709,7 @@ +@@ -726,7 +726,7 @@ quorum_section.add_attribute("two_node", "1") logging_section.add_attribute("to_logfile", "yes") diff -Nru pcs-0.9.151/debian/patches/0009-Fix-testsuite.patch pcs-0.9.153/debian/patches/0009-Fix-testsuite.patch --- pcs-0.9.151/debian/patches/0009-Fix-testsuite.patch 1970-01-01 00:00:00.000000000 +0000 +++ pcs-0.9.153/debian/patches/0009-Fix-testsuite.patch 2016-07-30 07:58:34.000000000 +0000 @@ -0,0 +1,327 @@ +--- a/pcs/test/test_cluster.py ++++ b/pcs/test/test_cluster.py +@@ -173,7 +173,7 @@ + + logging { + to_logfile: yes +- logfile: /var/log/cluster/corosync.log ++ logfile: /var/log/corosync/corosync.log + to_syslog: yes + } + """ +@@ -232,7 +232,7 @@ + + logging { + to_logfile: yes +- logfile: /var/log/cluster/corosync.log ++ logfile: /var/log/corosync/corosync.log + to_syslog: yes + } + """) +@@ -378,7 +378,7 @@ + + logging { + to_logfile: yes +- logfile: /var/log/cluster/corosync.log ++ logfile: /var/log/corosync/corosync.log + to_syslog: yes + } + """) +@@ -423,7 +423,7 @@ + + logging { + to_logfile: yes +- logfile: /var/log/cluster/corosync.log ++ logfile: /var/log/corosync/corosync.log + to_syslog: yes + } + """) +@@ -464,7 +464,7 @@ + + logging { + to_logfile: yes +- logfile: /var/log/cluster/corosync.log ++ logfile: /var/log/corosync/corosync.log + to_syslog: yes + } + """) +@@ -510,7 +510,7 @@ + + logging { + to_logfile: yes +- logfile: /var/log/cluster/corosync.log ++ logfile: /var/log/corosync/corosync.log + to_syslog: yes + } + """) +@@ -552,7 +552,7 @@ + + logging { + to_logfile: yes +- logfile: /var/log/cluster/corosync.log ++ logfile: /var/log/corosync/corosync.log + to_syslog: yes + } + """) +@@ -587,7 +587,7 @@ + + logging { + to_logfile: yes +- logfile: /var/log/cluster/corosync.log ++ logfile: /var/log/corosync/corosync.log + to_syslog: yes + } + """) +@@ -635,7 +635,7 @@ + + logging { + to_logfile: yes +- logfile: /var/log/cluster/corosync.log ++ logfile: /var/log/corosync/corosync.log + to_syslog: yes + } + """) +@@ -681,7 +681,7 @@ + + logging { + to_logfile: yes +- logfile: /var/log/cluster/corosync.log ++ logfile: /var/log/corosync/corosync.log + to_syslog: yes + } + """) +@@ -722,7 +722,7 @@ + + logging { + to_logfile: yes +- logfile: /var/log/cluster/corosync.log ++ logfile: /var/log/corosync/corosync.log + to_syslog: yes + } + """) +@@ -772,7 +772,7 @@ + + logging { + to_logfile: yes +- logfile: /var/log/cluster/corosync.log ++ logfile: /var/log/corosync/corosync.log + to_syslog: yes + } + """) +@@ -818,7 +818,7 @@ + + logging { + to_logfile: yes +- logfile: /var/log/cluster/corosync.log ++ logfile: /var/log/corosync/corosync.log + to_syslog: yes + } + """) +@@ -1217,7 +1217,7 @@ + + logging { + to_logfile: yes +- logfile: /var/log/cluster/corosync.log ++ logfile: /var/log/corosync/corosync.log + to_syslog: yes + } + """) +@@ -1338,7 +1338,7 @@ + + logging { + to_logfile: yes +- logfile: /var/log/cluster/corosync.log ++ logfile: /var/log/corosync/corosync.log + to_syslog: yes + } + """) +@@ -1397,7 +1397,7 @@ + + logging { + to_logfile: yes +- logfile: /var/log/cluster/corosync.log ++ logfile: /var/log/corosync/corosync.log + to_syslog: yes + } + """) +@@ -1456,7 +1456,7 @@ + + logging { + to_logfile: yes +- logfile: /var/log/cluster/corosync.log ++ logfile: /var/log/corosync/corosync.log + to_syslog: yes + } + """) +@@ -1517,7 +1517,7 @@ + + logging { + to_logfile: yes +- logfile: /var/log/cluster/corosync.log ++ logfile: /var/log/corosync/corosync.log + to_syslog: yes + } + """) +@@ -1583,7 +1583,7 @@ + + logging { + to_logfile: yes +- logfile: /var/log/cluster/corosync.log ++ logfile: /var/log/corosync/corosync.log + to_syslog: yes + } + """) +@@ -1648,7 +1648,7 @@ + + logging { + to_logfile: yes +- logfile: /var/log/cluster/corosync.log ++ logfile: /var/log/corosync/corosync.log + to_syslog: yes + } + """) +@@ -1713,7 +1713,7 @@ + + logging { + to_logfile: yes +- logfile: /var/log/cluster/corosync.log ++ logfile: /var/log/corosync/corosync.log + to_syslog: yes + } + """) +@@ -1802,7 +1802,7 @@ + + logging { + to_logfile: yes +- logfile: /var/log/cluster/corosync.log ++ logfile: /var/log/corosync/corosync.log + to_syslog: yes + } + """) +@@ -2381,7 +2381,7 @@ + + logging { + to_logfile: yes +- logfile: /var/log/cluster/corosync.log ++ logfile: /var/log/corosync/corosync.log + to_syslog: yes + } + """) +@@ -2620,7 +2620,7 @@ + + logging { + to_logfile: yes +- logfile: /var/log/cluster/corosync.log ++ logfile: /var/log/corosync/corosync.log + to_syslog: yes + } + """) +--- a/pcs/test/test_lib_corosync_config_parser.py ++++ b/pcs/test/test_lib_corosync_config_parser.py +@@ -1020,7 +1020,7 @@ + # Log to a log file. When set to "no", the "logfile" option + # must not be set. + to_logfile: yes +- logfile: /var/log/cluster/corosync.log ++ logfile: /var/log/corosync/corosync.log + # Log to the system log daemon. When in doubt, set to yes. + to_syslog: yes + # Log debug messages (very verbose). When in doubt, leave off. +@@ -1060,7 +1060,7 @@ + fileline: off + to_stderr: no + to_logfile: yes +- logfile: /var/log/cluster/corosync.log ++ logfile: /var/log/corosync/corosync.log + to_syslog: yes + debug: off + timestamp: on +@@ -1097,7 +1097,7 @@ + fileline: off + to_logfile: yes + to_syslog: yes +- logfile: /var/log/cluster/corosync.log ++ logfile: /var/log/corosync/corosync.log + debug: off + timestamp: on + logger_subsys { +@@ -1155,7 +1155,7 @@ + fileline: off + to_logfile: yes + to_syslog: yes +- logfile: /var/log/cluster/corosync.log ++ logfile: /var/log/corosync/corosync.log + debug: off + timestamp: on + +--- a/pcs/test/test_stonith.py ++++ b/pcs/test/test_stonith.py +@@ -191,7 +191,7 @@ + + output, returnVal = pcs( + temp_cib, +- "stonith create f4 fence_xvm meta provides=something" ++ "stonith create f4 fence_dummy meta provides=something" + ) + ac(output, "") + self.assertEqual(0, returnVal) +@@ -207,7 +207,7 @@ + Resource: f3 (class=stonith type=fence_scsi) + Meta Attrs: provides=unfencing + Operations: monitor interval=60s (f3-monitor-interval-60s) +- Resource: f4 (class=stonith type=fence_xvm) ++ Resource: f4 (class=stonith type=fence_dummy) + Meta Attrs: provides=something + Operations: monitor interval=60s (f4-monitor-interval-60s) + """) +@@ -240,7 +240,7 @@ + + output, returnVal = pcs( + temp_cib, +- "stonith create f4 fence_xvm meta provides=something" ++ "stonith create f4 fence_dummy meta provides=something" + ) + ac(output, "") + self.assertEqual(0, returnVal) +@@ -259,7 +259,7 @@ + Attributes: key=abc + Meta Attrs: provides=unfencing + Operations: monitor interval=60s (f3-monitor-interval-60s) +- Resource: f4 (class=stonith type=fence_xvm) ++ Resource: f4 (class=stonith type=fence_dummy) + Meta Attrs: provides=something + Operations: monitor interval=60s (f4-monitor-interval-60s) + """) +--- a/pcs/test/test_resource.py ++++ b/pcs/test/test_resource.py +@@ -2640,7 +2640,7 @@ + assert output == " Master: D1-master-custom\n Resource: D1 (class=ocf provider=heartbeat type=Dummy)\n Operations: monitor interval=60s (D1-monitor-interval-60s)\n Resource: D0 (class=ocf provider=heartbeat type=Dummy)\n Operations: monitor interval=60s (D0-monitor-interval-60s)\n Resource: D2 (class=ocf provider=heartbeat type=Dummy)\n Operations: monitor interval=60s (D2-monitor-interval-60s)\n", [output] + + def testLSBResource(self): +- output, returnVal = pcs(temp_cib, "resource create --no-default-ops D2 lsb:network") ++ output, returnVal = pcs(temp_cib, "resource create --no-default-ops D2 lsb:networking") + assert returnVal == 0 + assert output == "", [output] + +--- a/pcsd/test/test_config.rb ++++ b/pcsd/test/test_config.rb +@@ -125,7 +125,7 @@ + assert_equal( + [[ + 'error', +- "Unable to parse pcs_settings file: 399: unexpected token at '\"rh71-node2\"\n ]\n }\n ]\n}'" ++ "Unable to parse pcs_settings file: 409: unexpected token at '\"rh71-node2\"\n ]\n }\n ]\n}'" + ]], + $logger.log + ) +--- a/pcs/test/test_utils.py ++++ b/pcs/test/test_utils.py +@@ -1797,7 +1797,7 @@ + wait_seconds=.1 + ) + +- self.assertEqual(log, ['first', 'second']) ++ self.assertEqual(sorted(log), ['first', 'second']) + + def test_wait_for_slower_workers(self): + log = [] diff -Nru pcs-0.9.151/debian/patches/series pcs-0.9.153/debian/patches/series --- pcs-0.9.151/debian/patches/series 2016-06-20 14:03:43.000000000 +0000 +++ pcs-0.9.153/debian/patches/series 2016-07-30 07:58:34.000000000 +0000 @@ -5,3 +5,4 @@ 0006-Replace-orderedhash.patch 0007-Fix-IPv6-bind.patch 0008-Fix-corosync-log.patch +0009-Fix-testsuite.patch diff -Nru pcs-0.9.151/debian/pcsd.init pcs-0.9.153/debian/pcsd.init --- pcs-0.9.151/debian/pcsd.init 2015-05-08 09:44:31.000000000 +0000 +++ pcs-0.9.153/debian/pcsd.init 1970-01-01 00:00:00.000000000 +0000 @@ -1,141 +0,0 @@ -#!/bin/sh -## -# pcsd Pacemaker & Corosync configuration daemon -# -# chkconfig: - 21 81 -# description: Pacemaker & Corosync configuration daemon - -### BEGIN INIT INFO -# Provides: pcsd -# Required-Start: $remote_fs $network $syslog -# Required-Stop: $remote_fs $network $syslog -# Should-Start: -# Should-Stop: -# Default-Start: 2 3 4 5 -# Default-Stop: 0 1 6 -# Short-Description: Starts and stops Pacemaker & Corosync daemon -# Description: Starts and stops Pacemaker & Corosync daemon -### END INIT INFO - -# PATH -PATH=/usr/sbin:/usr/bin:/sbin:/bin -DESC="pcs daemon" -NAME=pcsd -EXEC=ruby -SUB_EXEC=/usr/share/pcsd/ssl.rb -DAEMON_USER=root -DAEMON=/usr/bin/ruby -DAEMON_ARGS="-C/var/lib/pcsd -I/usr/share/pcsd -- /usr/share/pcsd/ssl.rb" -PIDFILE=/var/run/$NAME.pid -SCRIPTNAME=/etc/init.d/$NAME -LOGFILE=/var/log/$NAME/$NAME.log -SLEEP_DURATION=2 - -# Exit if ruby is not installed -[ -x $(which $EXEC) ] || echo "$EXEC was not found. Is it installed?" -[ -x $(which $SUB_EXEC) ] || echo "$SUB_EXEC not found. Is pcs installed?" - -# Read configuration variable file if it is present -[ -r /etc/default/$NAME ] && . /etc/default/$NAME - -# Source lsb init functions -. /lib/lsb/init-functions - -is_running() -{ - # Test whether pid file exists or not - test -f $PIDFILE || return 1 - - # Test whether process is running or not - read PID < "$PIDFILE" - ps -p $PID >/dev/null 2>&1 || return 1 - - # Is running - return 0 -} - -root_only() -{ - if [ "$(id -u)" != "0" ]; then - echo "Only root should run this operation" - exit 1 - fi -} - -run() -{ - if is_running; then - PID="$(cat $PIDFILE)" - echo "Daemon is already running as PID $PID" - return 1 - fi - - nohup $DAEMON $DAEMON_ARGS > /dev/null 2>&1 - echo $! > $PIDFILE - read PID < "$PIDFILE" - - echo "PID is $PID" - - sleep $SLEEP_DURATION - if ! is_running; then - echo "Daemon died immediately after starting. Please check your logs and configurations." - return 1 - fi - - echo "Daemon is running as PID $PID" - return 0 -} - -stop() -{ - if is_running; then - read PID < "$PIDFILE" - kill -9 $PID - fi - - sleep $SLEEP_DURATION - if is_running; then - while is_running; do - echo "waiting for daemon to die (PID $PID)" - sleep $SLEEP_DURATION - done - fi - - # Be sure to remove the pid file - rm -f "$PIDFILE" - return 0 -} - -case "$1" in - start) - root_only - log_daemon_msg "Starting $DESC" "$NAME" - run - log_end_msg $? - ;; - stop) - root_only - log_daemon_msg "Stopping $DESC" "$NAME" - stop - log_end_msg $? - ;; - restart|force-reload) - log_daemon_msg "Restarting $DESC" "$NAME" - root_only - $0 stop && $0 start - ;; - status|monitor) - status_of_proc \ - -p "$PIDFILE" \ - "$SUB_EXEC" \ - "$NAME" \ - && exit 0 \ - || exit $? - ;; - *) - echo "Usage: $0 {start|stop|restart|reload|force-reload|status|monitor}" - exit 1 - ;; -esac - -: diff -Nru pcs-0.9.151/debian/pcs.install pcs-0.9.153/debian/pcs.install --- pcs-0.9.151/debian/pcs.install 2016-01-27 09:32:02.000000000 +0000 +++ pcs-0.9.153/debian/pcs.install 2016-07-30 07:58:34.000000000 +0000 @@ -1,5 +1,6 @@ etc/bash_completion.d/* usr/share/bash-completion/completions/ etc/default/* +etc/init.d/* etc/logrotate.d/* etc/pam.d/* lib/systemd/system/* diff -Nru pcs-0.9.151/debian/pcs.lintian-overrides pcs-0.9.153/debian/pcs.lintian-overrides --- pcs-0.9.151/debian/pcs.lintian-overrides 2016-01-27 09:32:02.000000000 +0000 +++ pcs-0.9.153/debian/pcs.lintian-overrides 1970-01-01 00:00:00.000000000 +0000 @@ -1,3 +0,0 @@ -# PCS/PCSD ships with a service file which does not -# need the init script -script-in-etc-init.d-not-registered-via-update-rc.d diff -Nru pcs-0.9.151/debian/rules pcs-0.9.153/debian/rules --- pcs-0.9.151/debian/rules 2016-06-20 14:03:43.000000000 +0000 +++ pcs-0.9.153/debian/rules 2016-07-30 07:58:34.000000000 +0000 @@ -36,6 +36,10 @@ BUILD_GEMS=false \ systemddir=/lib/systemd \ SYSTEMCTL_OVERRIDE=true + + # Always install the init script + install -m 755 -D pcsd/pcsd.debian $(CURDIR)/debian/tmp/etc/init.d/pcsd + # remove embedded fonts set -e && cd $(CURDIR)/debian/tmp/usr/share/pcsd/public/css && \ for ttf in Liberation*.ttf; do \ @@ -47,7 +51,7 @@ dh_python2 -p pcs /usr/share/pcsd override_dh_installinit: - dh_installinit -n --name=pcsd -- + dh_installinit --onlyscripts --name=pcsd override_dh_compress: # make non-scripts non-executable @@ -56,5 +60,6 @@ chmod -x $(CURDIR)/debian/pcs/usr/lib/python*/*-packages/pcs/settings.py chmod -x $(CURDIR)/debian/pcs/usr/share/bash-completion/completions/pcs chmod a+x $(CURDIR)/debian/pcs/usr/share/pcsd/pcsd.debian + chmod a+x $(CURDIR)/debian/pcs/usr/share/pcsd/pcsd.service-runner chmod 755 $(CURDIR)/debian/pcs/usr/share/pcsd/public/css/images/ dh_compress diff -Nru pcs-0.9.151/debian/tests/control pcs-0.9.153/debian/tests/control --- pcs-0.9.151/debian/tests/control 2016-06-20 14:03:43.000000000 +0000 +++ pcs-0.9.153/debian/tests/control 2016-07-30 07:58:34.000000000 +0000 @@ -1,3 +1,3 @@ -Depends: @, pacemaker, pacemaker-cli-utils -Tests: status, setup +Depends: @, pacemaker, pacemaker-cli-utils, make, fence-agents, python-mock +Tests: status, setup, testsuite-pcsd, testsuite-pcs Restrictions: needs-root diff -Nru pcs-0.9.151/debian/tests/setup pcs-0.9.153/debian/tests/setup --- pcs-0.9.151/debian/tests/setup 2016-06-20 14:07:35.000000000 +0000 +++ pcs-0.9.153/debian/tests/setup 2016-07-30 07:58:34.000000000 +0000 @@ -11,7 +11,7 @@ trap "cleanup" 0 2 3 15 -pcs cluster setup --name debian --local --force --start localhost +pcs cluster setup --name debian --local --force --start localhost 2>&1 service pcsd start sleep 60 diff -Nru pcs-0.9.151/debian/tests/testsuite-pcs pcs-0.9.153/debian/tests/testsuite-pcs --- pcs-0.9.151/debian/tests/testsuite-pcs 1970-01-01 00:00:00.000000000 +0000 +++ pcs-0.9.153/debian/tests/testsuite-pcs 2016-07-30 07:58:34.000000000 +0000 @@ -0,0 +1,12 @@ +#!/bin/sh + +set -e + +cat >>/etc/hosts <&1 diff -Nru pcs-0.9.151/debian/tests/testsuite-pcsd pcs-0.9.153/debian/tests/testsuite-pcsd --- pcs-0.9.151/debian/tests/testsuite-pcsd 1970-01-01 00:00:00.000000000 +0000 +++ pcs-0.9.153/debian/tests/testsuite-pcsd 2016-07-30 07:58:34.000000000 +0000 @@ -0,0 +1,5 @@ +#!/bin/sh + +set -e + +make -C /usr/share/pcsd/test test diff -Nru pcs-0.9.151/Makefile pcs-0.9.153/Makefile --- pcs-0.9.151/Makefile 2016-05-23 13:20:12.000000000 +0000 +++ pcs-0.9.153/Makefile 2016-07-01 13:31:39.000000000 +0000 @@ -76,11 +76,16 @@ endif endif + +ifndef BASH_COMPLETION_DIR + BASH_COMPLETION_DIR=${DESTDIR}/etc/bash_completion.d +endif + install: $(PYTHON) setup.py install --root=$(or ${DESTDIR}, /) ${EXTRA_SETUP_OPTS} mkdir -p ${DESTDIR}${PREFIX}/sbin/ mv ${DESTDIR}${PREFIX}/bin/pcs ${DESTDIR}${PREFIX}/sbin/pcs - install -D pcs/bash_completion.sh ${DESTDIR}/etc/bash_completion.d/pcs + install -D pcs/bash_completion.sh ${BASH_COMPLETION_DIR}/pcs install -m644 -D pcs/pcs.8 ${DESTDIR}/${MANDIR}/man8/pcs.8 ifeq ($(IS_DEBIAN),true) ifeq ($(install_settings),true) @@ -126,6 +131,9 @@ ifeq ($(IS_SYSTEMCTL),true) install -d ${DESTDIR}/${systemddir}/system/ install -m 644 pcsd/pcsd.service ${DESTDIR}/${systemddir}/system/ +# ${DESTDIR}${PREFIX}/lib/pcsd/pcsd holds the selinux context + install -m 755 pcsd/pcsd.service-runner ${DESTDIR}${PREFIX}/lib/pcsd/pcsd + rm ${DESTDIR}${PREFIX}/lib/pcsd/pcsd.service-runner else install -m 755 -D pcsd/pcsd ${DESTDIR}/${initdir}/pcsd endif diff -Nru pcs-0.9.151/pcs/alert.py pcs-0.9.153/pcs/alert.py --- pcs-0.9.151/pcs/alert.py 1970-01-01 00:00:00.000000000 +0000 +++ pcs-0.9.153/pcs/alert.py 2016-07-01 13:31:39.000000000 +0000 @@ -0,0 +1,237 @@ +from __future__ import ( + absolute_import, + division, + print_function, + unicode_literals, +) + +import sys + +from pcs import ( + usage, + utils, +) +from pcs.cli.common.errors import CmdLineInputError +from pcs.cli.common.parse_args import prepare_options +from pcs.cli.common.console_report import indent +from pcs.lib.errors import LibraryError + + +def alert_cmd(*args): + argv = args[1] + if not argv: + sub_cmd = "config" + else: + sub_cmd = argv.pop(0) + try: + if sub_cmd == "help": + usage.alert(argv) + elif sub_cmd == "create": + alert_add(*args) + elif sub_cmd == "update": + alert_update(*args) + elif sub_cmd == "remove": + alert_remove(*args) + elif sub_cmd == "config" or sub_cmd == "show": + print_alert_config(*args) + elif sub_cmd == "recipient": + recipient_cmd(*args) + else: + raise CmdLineInputError() + except LibraryError as e: + utils.process_library_reports(e.args) + except CmdLineInputError as e: + utils.exit_on_cmdline_input_errror(e, "alert", sub_cmd) + + +def recipient_cmd(*args): + argv = args[1] + + if not argv: + usage.alert(["recipient"]) + sys.exit(1) + + sub_cmd = argv.pop(0) + try: + if sub_cmd == "help": + usage.alert(["recipient"]) + elif sub_cmd == "add": + recipient_add(*args) + elif sub_cmd == "update": + recipient_update(*args) + elif sub_cmd == "remove": + recipient_remove(*args) + except CmdLineInputError as e: + utils.exit_on_cmdline_input_errror( + e, "alert", "recipient {0}".format(sub_cmd) + ) + + +def parse_cmd_sections(arg_list, section_list): + output = dict([(section, []) for section in section_list + ["main"]]) + cur_section = "main" + for arg in arg_list: + if arg in section_list: + cur_section = arg + continue + output[cur_section].append(arg) + + return output + + +def ensure_only_allowed_options(parameter_dict, allowed_list): + for arg, value in parameter_dict.items(): + if arg not in allowed_list: + raise CmdLineInputError( + "Unexpected parameter '{0}={1}'".format(arg, value) + ) + + +def alert_add(lib, argv, modifiers): + if not argv: + raise CmdLineInputError() + + sections = parse_cmd_sections(argv, ["options", "meta"]) + main_args = prepare_options(sections["main"]) + ensure_only_allowed_options(main_args, ["id", "description", "path"]) + + lib.alert.create_alert( + main_args.get("id", None), + main_args.get("path", None), + prepare_options(sections["options"]), + prepare_options(sections["meta"]), + main_args.get("description", None) + ) + + +def alert_update(lib, argv, modifiers): + if not argv: + raise CmdLineInputError() + + alert_id = argv[0] + + sections = parse_cmd_sections(argv[1:], ["options", "meta"]) + main_args = prepare_options(sections["main"]) + ensure_only_allowed_options(main_args, ["description", "path"]) + + lib.alert.update_alert( + alert_id, + main_args.get("path", None), + prepare_options(sections["options"]), + prepare_options(sections["meta"]), + main_args.get("description", None) + ) + + +def alert_remove(lib, argv, modifiers): + if len(argv) != 1: + raise CmdLineInputError() + + lib.alert.remove_alert(argv[0]) + + +def recipient_add(lib, argv, modifiers): + if len(argv) < 2: + raise CmdLineInputError() + + alert_id = argv[0] + recipient_value = argv[1] + + sections = parse_cmd_sections(argv[2:], ["options", "meta"]) + main_args = prepare_options(sections["main"]) + ensure_only_allowed_options(main_args, ["description"]) + + lib.alert.add_recipient( + alert_id, + recipient_value, + prepare_options(sections["options"]), + prepare_options(sections["meta"]), + main_args.get("description", None) + ) + + +def recipient_update(lib, argv, modifiers): + if len(argv) < 2: + raise CmdLineInputError() + + alert_id = argv[0] + recipient_value = argv[1] + + sections = parse_cmd_sections(argv[2:], ["options", "meta"]) + main_args = prepare_options(sections["main"]) + ensure_only_allowed_options(main_args, ["description"]) + + lib.alert.update_recipient( + alert_id, + recipient_value, + prepare_options(sections["options"]), + prepare_options(sections["meta"]), + main_args.get("description", None) + ) + + +def recipient_remove(lib, argv, modifiers): + if len(argv) != 2: + raise CmdLineInputError() + + lib.alert.remove_recipient(argv[0], argv[1]) + + +def _nvset_to_str(nvset_obj): + output = [] + for nvpair_obj in nvset_obj: + output.append("{key}={value}".format( + key=nvpair_obj["name"], value=nvpair_obj["value"] + )) + return " ".join(output) + + +def __description_attributes_to_str(obj): + output = [] + if obj.get("description"): + output.append("Description: {desc}".format(desc=obj["description"])) + if obj.get("instance_attributes"): + output.append("Options: {attributes}".format( + attributes=_nvset_to_str(obj["instance_attributes"]) + )) + if obj.get("meta_attributes"): + output.append("Meta options: {attributes}".format( + attributes=_nvset_to_str(obj["meta_attributes"]) + )) + return output + + +def _alert_to_str(alert): + content = [] + content.extend(__description_attributes_to_str(alert)) + + recipients = [] + for recipient in alert.get("recipient_list", []): + recipients.extend( _recipient_to_str(recipient)) + + if recipients: + content.append("Recipients:") + content.extend(indent(recipients, 1)) + + return ["Alert: {alert_id} (path={path})".format( + alert_id=alert["id"], path=alert["path"] + )] + indent(content, 1) + + +def _recipient_to_str(recipient): + return ["Recipient: {value}".format(value=recipient["value"])] + indent( + __description_attributes_to_str(recipient), 1 + ) + + +def print_alert_config(lib, argv, modifiers): + if argv: + raise CmdLineInputError() + + print("Alerts:") + alert_list = lib.alert.get_all_alerts() + if alert_list: + for alert in alert_list: + print("\n".join(indent(_alert_to_str(alert), 1))) + else: + print(" No alerts defined") diff -Nru pcs-0.9.151/pcs/app.py pcs-0.9.153/pcs/app.py --- pcs-0.9.151/pcs/app.py 2016-05-23 13:20:12.000000000 +0000 +++ pcs-0.9.153/pcs/app.py 2016-07-01 13:31:39.000000000 +0000 @@ -19,6 +19,7 @@ node, pcsd, prop, + qdevice, quorum, resource, settings, @@ -26,6 +27,7 @@ stonith, usage, utils, + alert, ) from pcs.cli.common import completion @@ -95,7 +97,7 @@ "token=", "token_coefficient=", "consensus=", "join=", "miss_count_const=", "fail_recv_const=", "corosync_conf=", "cluster_conf=", - "remote", + "remote", "watchdog=", #in pcs status - do not display resorce status on inactive node "hide-inactive", ] @@ -123,10 +125,16 @@ argv = real_argv for o, a in pcs_options: if not o in utils.pcs_options: + if o == "--watchdog": + a = [a] utils.pcs_options[o] = a else: # If any options are a list then they've been entered twice which isn't valid - utils.err("%s can only be used once" % o) + if o != "--watchdog": + utils.err("%s can only be used once" % o) + else: + utils.pcs_options[o].append(a) + if o == "-h" or o == "--help": if len(argv) == 0: usage.main() @@ -181,6 +189,16 @@ argv, utils.get_modificators() ), + "qdevice": lambda argv: qdevice.qdevice_cmd( + utils.get_library_wrapper(), + argv, + utils.get_modificators() + ), + "alert": lambda args: alert.alert_cmd( + utils.get_library_wrapper(), + args, + utils.get_modificators() + ), } if command not in cmd_map: usage.main() diff -Nru pcs-0.9.151/pcs/cli/common/env.py pcs-0.9.153/pcs/cli/common/env.py --- pcs-0.9.151/pcs/cli/common/env.py 2016-05-23 13:20:12.000000000 +0000 +++ pcs-0.9.153/pcs/cli/common/env.py 2016-07-01 13:31:39.000000000 +0000 @@ -8,6 +8,7 @@ class Env(object): def __init__(self): self.cib_data = None + self.cib_upgraded = False self.user = None self.groups = None self.corosync_conf_data = None diff -Nru pcs-0.9.151/pcs/cli/common/lib_wrapper.py pcs-0.9.153/pcs/cli/common/lib_wrapper.py --- pcs-0.9.151/pcs/cli/common/lib_wrapper.py 2016-05-23 13:20:12.000000000 +0000 +++ pcs-0.9.153/pcs/cli/common/lib_wrapper.py 2016-07-01 13:31:39.000000000 +0000 @@ -15,7 +15,12 @@ from pcs.lib.commands.constraint import colocation as constraint_colocation from pcs.lib.commands.constraint import order as constraint_order from pcs.lib.commands.constraint import ticket as constraint_ticket -from pcs.lib.commands import quorum +from pcs.lib.commands import ( + quorum, + qdevice, + sbd, + alert, +) from pcs.cli.common.reports import ( LibraryReportProcessorToConsole as LibraryReportProcessorToConsole, ) @@ -38,6 +43,14 @@ cli_env.auth_tokens_getter, ) +def lib_env_to_cli_env(lib_env, cli_env): + if not lib_env.is_cib_live: + cli_env.cib_data = lib_env._get_cib_xml() + cli_env.cib_upgraded = lib_env.cib_upgraded + if not lib_env.is_corosync_conf_live: + cli_env.corosync_conf_data = lib_env.get_corosync_conf_data() + return cli_env + def bind(cli_env, run_with_middleware, run_library_command): def run(cli_env, *args, **kwargs): lib_env = cli_env_to_lib_env(cli_env) @@ -46,10 +59,7 @@ #midlewares needs finish its work and they see only cli_env #so we need reflect some changes to cli_env - if not lib_env.is_cib_live: - cli_env.cib_data = lib_env.get_cib_xml() - if not lib_env.is_corosync_conf_live: - cli_env.corosync_conf_data = lib_env.get_corosync_conf_data() + lib_env_to_cli_env(lib_env, cli_env) return lib_call_result return partial(run_with_middleware, run, cli_env) @@ -106,10 +116,61 @@ "add_device": quorum.add_device, "get_config": quorum.get_config, "remove_device": quorum.remove_device, + "set_expected_votes_live": quorum.set_expected_votes_live, "set_options": quorum.set_options, + "status": quorum.status_text, + "status_device": quorum.status_device_text, "update_device": quorum.update_device, } ) + if name == "qdevice": + return bind_all( + env, + middleware.build(), + { + "status": qdevice.qdevice_status_text, + "setup": qdevice.qdevice_setup, + "destroy": qdevice.qdevice_destroy, + "start": qdevice.qdevice_start, + "stop": qdevice.qdevice_stop, + "kill": qdevice.qdevice_kill, + "enable": qdevice.qdevice_enable, + "disable": qdevice.qdevice_disable, + # following commands are internal use only, called from pcsd + "client_net_setup": qdevice.client_net_setup, + "client_net_import_certificate": + qdevice.client_net_import_certificate, + "client_net_destroy": qdevice.client_net_destroy, + "sign_net_cert_request": + qdevice.qdevice_net_sign_certificate_request, + } + ) + if name == "sbd": + return bind_all( + env, + middleware.build(), + { + "enable_sbd": sbd.enable_sbd, + "disable_sbd": sbd.disable_sbd, + "get_cluster_sbd_status": sbd.get_cluster_sbd_status, + "get_cluster_sbd_config": sbd.get_cluster_sbd_config, + "get_local_sbd_config": sbd.get_local_sbd_config, + } + ) + if name == "alert": + return bind_all( + env, + middleware.build(middleware_factory.cib), + { + "create_alert": alert.create_alert, + "update_alert": alert.update_alert, + "remove_alert": alert.remove_alert, + "add_recipient": alert.add_recipient, + "update_recipient": alert.update_recipient, + "remove_recipient": alert.remove_recipient, + "get_all_alerts": alert.get_all_alerts, + } + ) raise Exception("No library part '{0}'".format(name)) diff -Nru pcs-0.9.151/pcs/cli/common/middleware.py pcs-0.9.153/pcs/cli/common/middleware.py --- pcs-0.9.151/pcs/cli/common/middleware.py 2016-05-23 13:20:12.000000000 +0000 +++ pcs-0.9.153/pcs/cli/common/middleware.py 2016-07-01 13:31:39.000000000 +0000 @@ -34,7 +34,7 @@ result_of_next = next_in_line(env, *args, **kwargs) if use_local_cib: - write_cib(env.cib_data) + write_cib(env.cib_data, env.cib_upgraded) return result_of_next return apply diff -Nru pcs-0.9.151/pcs/cli/common/reports.py pcs-0.9.153/pcs/cli/common/reports.py --- pcs-0.9.151/pcs/cli/common/reports.py 2016-05-23 13:20:12.000000000 +0000 +++ pcs-0.9.153/pcs/cli/common/reports.py 2016-07-01 13:31:39.000000000 +0000 @@ -49,7 +49,7 @@ return get_template(report_item).format(force=force_text) -def process_library_reports(report_item_list, is_forced=False): +def process_library_reports(report_item_list): """ report_item_list list of ReportItem """ @@ -63,13 +63,6 @@ print(report_item.message) continue - if report_item.forceable and is_forced: - # Let the user know what may be wrong even when --force is used, - # as it may be used for override early errors hiding later - # errors otherwise. - print("Warning: " + report_item.message) - continue - sys.stderr.write('Error: {0}\n'.format(_build_report_message( report_item, _prepare_force_text(report_item) diff -Nru pcs-0.9.151/pcs/cli/constraint_ticket/console_report.py pcs-0.9.153/pcs/cli/constraint_ticket/console_report.py --- pcs-0.9.151/pcs/cli/constraint_ticket/console_report.py 2016-05-23 13:20:12.000000000 +0000 +++ pcs-0.9.153/pcs/cli/constraint_ticket/console_report.py 2016-07-01 13:31:39.000000000 +0000 @@ -13,14 +13,13 @@ bool with_id have to show id with options_dict """ options = constraint_info["options"] - return " ".join( - [options.get("rsc-role", ""), options.get("rsc", "")] - + - prepare_options( - dict( - (name, value) for name, value in options.items() - if name not in ["rsc-role", "rsc"] - ), - with_id - ) - ) + role = options.get("rsc-role", "") + role_prefix = "{0} ".format(role) if role else "" + + return role_prefix + " ".join([options.get("rsc", "")] + prepare_options( + dict( + (name, value) for name, value in options.items() + if name not in ["rsc-role", "rsc"] + ), + with_id + )) diff -Nru pcs-0.9.151/pcs/cli/constraint_ticket/test/test_console_report.py pcs-0.9.153/pcs/cli/constraint_ticket/test/test_console_report.py --- pcs-0.9.151/pcs/cli/constraint_ticket/test/test_console_report.py 2016-05-23 13:20:12.000000000 +0000 +++ pcs-0.9.153/pcs/cli/constraint_ticket/test/test_console_report.py 2016-07-01 13:31:39.000000000 +0000 @@ -21,3 +21,15 @@ with_id=True ) ) + + def test_prepare_report_without_role(self): + self.assertEqual( + "resourceA (id:some_id)", + console_report.constraint_plain( + {"options": { + "rsc": "resourceA", + "id": "some_id" + }}, + with_id=True + ) + ) diff -Nru pcs-0.9.151/pcs/cluster.py pcs-0.9.153/pcs/cluster.py --- pcs-0.9.151/pcs/cluster.py 2016-05-23 13:20:12.000000000 +0000 +++ pcs-0.9.153/pcs/cluster.py 2016-07-01 13:31:39.000000000 +0000 @@ -36,12 +36,29 @@ ) from pcs.utils import parallel_for_nodes from pcs.common import report_codes +from pcs.cli.common.reports import process_library_reports from pcs.lib import ( pacemaker as lib_pacemaker, + sbd as lib_sbd, reports as lib_reports, ) -from pcs.lib.errors import ReportItemSeverity, LibraryError -from pcs.lib.corosync import config_parser as corosync_conf_utils +from pcs.lib.commands.quorum import _add_device_model_net +from pcs.lib.corosync import ( + config_parser as corosync_conf_utils, + qdevice_net, +) +from pcs.lib.corosync.config_facade import ConfigFacade as corosync_conf_facade +from pcs.lib.errors import ( + LibraryError, + ReportItemSeverity, +) +from pcs.lib.external import ( + disable_service, + NodeCommunicationException, + node_communicator_exception_to_report_item, +) +from pcs.lib.node import NodeAddresses +from pcs.lib.tools import environment_file_to_dict def cluster_cmd(argv): if len(argv) == 0: @@ -277,7 +294,7 @@ ) if udpu_rrp and "rrp_mode" not in options["transport_options"]: options["transport_options"]["rrp_mode"] = "passive" - utils.process_library_reports(messages) + process_library_reports(messages) # prepare config file if is_rhel6: @@ -295,7 +312,7 @@ options["totem_options"], options["quorum_options"] ) - utils.process_library_reports(messages) + process_library_reports(messages) # setup on the local node if "--local" in utils.pcs_options: @@ -859,6 +876,7 @@ return print("Starting Cluster...") + service_list = [] if utils.is_rhel6(): # Verify that CMAN_QUORUM_TIMEOUT is set, if not, then we set it to 0 retval, output = getstatusoutput('source /etc/sysconfig/cman ; [ -z "$CMAN_QUORUM_TIMEOUT" ]') @@ -871,14 +889,15 @@ print(output) utils.err("unable to start cman") else: - output, retval = utils.run(["service", "corosync","start"]) + service_list.append("corosync") + if utils.need_to_handle_qdevice_service(): + service_list.append("corosync-qdevice") + service_list.append("pacemaker") + for service in service_list: + output, retval = utils.run(["service", service, "start"]) if retval != 0: print(output) - utils.err("unable to start corosync") - output, retval = utils.run(["service", "pacemaker", "start"]) - if retval != 0: - print(output) - utils.err("unable to start pacemaker") + utils.err("unable to start {0}".format(service)) if wait: wait_for_nodes_started([], wait_timeout) @@ -1024,14 +1043,20 @@ enable_cluster_nodes(argv) return - utils.enableServices() + try: + utils.enableServices() + except LibraryError as e: + process_library_reports(e.args) def disable_cluster(argv): if len(argv) > 0: disable_cluster_nodes(argv) return - utils.disableServices() + try: + utils.disableServices() + except LibraryError as e: + process_library_reports(e.args) def enable_cluster_all(): enable_cluster_nodes(utils.getNodesFromCorosyncConf()) @@ -1121,13 +1146,18 @@ utils.err("unable to stop cman") else: print("Stopping Cluster (corosync)...") - output, retval = utils.run(["service", "corosync","stop"]) - if retval != 0: - print(output) - utils.err("unable to stop corosync") + service_list = [] + if utils.need_to_handle_qdevice_service(): + service_list.append("corosync-qdevice") + service_list.append("corosync") + for service in service_list: + output, retval = utils.run(["service", service, "stop"]) + if retval != 0: + print(output) + utils.err("unable to stop {0}".format(service)) def kill_cluster(argv): - daemons = ["crmd", "pengine", "attrd", "lrmd", "stonithd", "cib", "pacemakerd", "corosync"] + daemons = ["crmd", "pengine", "attrd", "lrmd", "stonithd", "cib", "pacemakerd", "corosync-qdevice", "corosync"] dummy_output, dummy_retval = utils.run(["killall", "-9"] + daemons) # if dummy_retval != 0: # print "Error: unable to execute killall -9" @@ -1141,6 +1171,9 @@ filename = None scope = None + timeout = None + if "--wait" in utils.pcs_options: + timeout = utils.validate_wait_get_timeout() for arg in argv: if "=" not in arg: filename = arg @@ -1176,8 +1209,20 @@ output, retval = utils.run(command) if retval != 0: utils.err("unable to push cib\n" + output) - else: - print("CIB updated") + print("CIB updated") + if "--wait" not in utils.pcs_options: + return + cmd = ["crm_resource", "--wait"] + if timeout: + cmd.extend(["--timeout", timeout]) + output, retval = utils.run(cmd) + if retval != 0: + msg = [] + if retval == settings.pacemaker_wait_timeout_status: + msg.append("waiting timeout") + if output: + msg.append("\n" + output) + utils.err("\n".join(msg).strip()) def cluster_edit(argv): if 'EDITOR' in os.environ: @@ -1315,6 +1360,50 @@ if not canAdd: utils.err("Unable to add '%s' to cluster: %s" % (node0, error)) + lib_env = utils.get_lib_env() + report_processor = lib_env.report_processor + node_communicator = lib_env.node_communicator() + node_addr = NodeAddresses(node0, node1) + try: + if lib_sbd.is_sbd_enabled(utils.cmd_runner()): + if "--watchdog" not in utils.pcs_options: + watchdog = settings.sbd_watchdog_default + print("Warning: using default watchdog '{0}'".format( + watchdog + )) + else: + watchdog = utils.pcs_options["--watchdog"][0] + + report_processor.process(lib_reports.sbd_check_started()) + lib_sbd.check_sbd_on_node( + report_processor, node_communicator, node_addr, watchdog + ) + sbd_cfg = environment_file_to_dict( + lib_sbd.get_local_sbd_config() + ) + sbd_cfg["SBD_WATCHDOG_DEV"] = watchdog + report_processor.process( + lib_reports.sbd_config_distribution_started() + ) + lib_sbd.set_sbd_config_on_node( + report_processor, node_communicator, node_addr, sbd_cfg + ) + report_processor.process(lib_reports.sbd_enabling_started()) + lib_sbd.enable_sbd_service_on_node( + report_processor, node_communicator, node_addr + ) + else: + report_processor.process(lib_reports.sbd_disabling_started()) + lib_sbd.disable_sbd_service_on_node( + report_processor, node_communicator, node_addr + ) + except LibraryError as e: + process_library_reports(e.args) + except NodeCommunicationException as e: + process_library_reports( + [node_communicator_exception_to_report_item(e)] + ) + for my_node in utils.getNodesFromCorosyncConf(): retval, output = utils.addLocalNode(my_node, node0, node1) if retval != 0: @@ -1325,6 +1414,8 @@ else: print("%s: Corosync updated" % my_node) corosync_conf = output + # corosync.conf must be reloaded before the new node is started + output, retval = utils.reloadCorosync() if corosync_conf != None: # send local cluster pcsd configs to the new node # may be used for sending corosync config as well in future @@ -1348,6 +1439,25 @@ except: utils.err('Unable to communicate with pcsd') + # set qdevice-net certificates if needed + if not utils.is_rhel6(): + try: + conf_facade = corosync_conf_facade.from_string( + corosync_conf + ) + qdevice_model, qdevice_model_options, _ = conf_facade.get_quorum_device_settings() + if qdevice_model == "net": + _add_device_model_net( + lib_env, + qdevice_model_options["host"], + conf_facade.get_cluster_name(), + [node_addr], + skip_offline_nodes=False + ) + except LibraryError as e: + process_library_reports(e.args) + + print("Setting up corosync...") utils.setCorosyncConfig(node0, corosync_conf) if "--enable" in utils.pcs_options: retval, err = utils.enableCluster(node0) @@ -1363,7 +1473,6 @@ pcsd.pcsd_sync_certs([node0], exit_after_error=False) else: utils.err("Unable to update any nodes") - output, retval = utils.reloadCorosync() if utils.is_cman_with_udpu_transport(): print("Warning: Using udpu transport on a CMAN cluster, " + "cluster restart is required to apply node addition") @@ -1458,6 +1567,30 @@ else: success = utils.removeNodeFromClusterConf(node) +# The removed node might be present in CIB. If it is, pacemaker will show it as +# offline, no matter it's not in corosync / cman config any longer. We remove +# the node by running 'crm_node -R ' on the node where the remove command +# was ran. This only works if pacemaker is running. If it's not, we need +# to remove the node manually from the CIB on all nodes. + cib_node_remove = None + if utils.usefile: + cib_node_remove = utils.filename + elif not utils.is_service_running(utils.cmd_runner(), "pacemaker"): + cib_node_remove = os.path.join(settings.cib_dir, "cib.xml") + if cib_node_remove: + original_usefile, original_filename = utils.usefile, utils.filename + utils.usefile = True + utils.filename = cib_node_remove + dummy_output, dummy_retval = utils.run([ + "cibadmin", + "--delete-all", + "--force", + "--xpath=/cib/configuration/nodes/node[@uname='{0}']".format( + node + ), + ]) + utils.usefile, utils.filename = original_usefile, original_filename + if success: print("%s: successfully removed!" % node) else: @@ -1612,10 +1745,23 @@ else: print("Shutting down pacemaker/corosync services...") os.system("service pacemaker stop") + # returns error if qdevice is not running, it is safe to ignore it + # since we want it not to be running + os.system("service corosync-qdevice stop") os.system("service corosync stop") print("Killing any remaining services...") - os.system("killall -q -9 corosync aisexec heartbeat pacemakerd ccm stonithd ha_logd lrmd crmd pengine attrd pingd mgmtd cib fenced dlm_controld gfs_controld") - utils.disableServices() + os.system("killall -q -9 corosync corosync-qdevice aisexec heartbeat pacemakerd ccm stonithd ha_logd lrmd crmd pengine attrd pingd mgmtd cib fenced dlm_controld gfs_controld") + try: + utils.disableServices() + except: + # previously errors were suppressed in here, let's keep it that way + # for now + pass + try: + disable_service(utils.cmd_runner(), "sbd") + except: + # it's not a big deal if sbd disable fails + pass print("Removing all cluster configuration files...") if utils.is_rhel6(): @@ -1626,6 +1772,12 @@ "pe*.bz2","cib.*"] for name in state_files: os.system("find /var/lib -name '"+name+"' -exec rm -f \{\} \;") + try: + qdevice_net.client_destroy() + except: + # errors from deleting other files are suppressed as well + # we do not want to fail if qdevice was not set up + pass def cluster_verify(argv): nofilename = True @@ -1740,7 +1892,7 @@ def cluster_quorum_unblock(argv): if len(argv) > 0: - usage.cluster(["quorum", "unblock"]) + usage.quorum(["unblock"]) sys.exit(1) if utils.is_rhel6(): @@ -1761,8 +1913,20 @@ ) if not unjoined_nodes: utils.err("no unjoined nodes found") + if "--force" not in utils.pcs_options: + answer = utils.get_terminal_input( + ( + "WARNING: If node(s) {nodes} are not powered off or they do" + + " have access to shared resources, data corruption and/or" + + " cluster failure may occur. Are you sure you want to" + + " continue? [y/N] " + ).format(nodes=", ".join(unjoined_nodes)) + ) + if answer.lower() not in ["y", "yes"]: + print("Canceled") + return for node in unjoined_nodes: - stonith.stonith_confirm([node]) + stonith.stonith_confirm([node], skip_question=True) output, retval = utils.run( ["corosync-cmapctl", "-s", "quorum.cancel_wait_for_all", "u8", "1"] @@ -1777,5 +1941,5 @@ "false" if startup_fencing.lower() != "false" else "true" ) utils.set_cib_property("startup-fencing", startup_fencing) - print("Waiting for nodes cancelled") + print("Waiting for nodes canceled") diff -Nru pcs-0.9.151/pcs/common/report_codes.py pcs-0.9.153/pcs/common/report_codes.py --- pcs-0.9.151/pcs/common/report_codes.py 2016-05-23 13:20:12.000000000 +0000 +++ pcs-0.9.153/pcs/common/report_codes.py 2016-07-01 13:31:39.000000000 +0000 @@ -12,14 +12,26 @@ FORCE_LOAD_THRESHOLD = "LOAD_THRESHOLD" FORCE_OPTIONS = "OPTIONS" FORCE_QDEVICE_MODEL = "QDEVICE_MODEL" +FORCE_UNKNOWN_AGENT = "UNKNOWN_AGENT" +FORCE_UNSUPPORTED_AGENT = "UNSUPPORTED_AGENT" +FORCE_METADATA_ISSUE = "METADATA_ISSUE" SKIP_OFFLINE_NODES = "SKIP_OFFLINE_NODES" +AGENT_GENERAL_ERROR = "AGENT_GENERAL_ERROR" +AGENT_NOT_FOUND = "AGENT_NOT_FOUND" BAD_CLUSTER_STATE_FORMAT = 'BAD_CLUSTER_STATE_FORMAT' +CIB_ALERT_NOT_FOUND = "CIB_ALERT_NOT_FOUND" +CIB_ALERT_RECIPIENT_ALREADY_EXISTS = "CIB_ALERT_RECIPIENT_ALREADY_EXISTS" +CIB_ALERT_RECIPIENT_NOT_FOUND = "CIB_ALERT_RECIPIENT_NOT_FOUND" CIB_CANNOT_FIND_MANDATORY_SECTION = "CIB_CANNOT_FIND_MANDATORY_SECTION" CIB_LOAD_ERROR_BAD_FORMAT = "CIB_LOAD_ERROR_BAD_FORMAT" CIB_LOAD_ERROR = "CIB_LOAD_ERROR" CIB_LOAD_ERROR_SCOPE_MISSING = "CIB_LOAD_ERROR_SCOPE_MISSING" CIB_PUSH_ERROR = "CIB_PUSH_ERROR" +CIB_UPGRADE_FAILED = "CIB_UPGRADE_FAILED" +CIB_UPGRADE_FAILED_TO_MINIMAL_REQUIRED_VERSION = "CIB_UPGRADE_FAILED_TO_MINIMAL_REQUIRED_VERSION" +CIB_UPGRADE_SUCCESSFUL = "CIB_UPGRADE_SUCCESSFUL" +CLUSTER_RESTART_REQUIRED_TO_APPLY_CHANGES = "CLUSTER_RESTART_REQUIRED_TO_APPLY_CHANGES" CMAN_BROADCAST_ALL_RINGS = 'CMAN_BROADCAST_ALL_RINGS' CMAN_UDPU_RESTART_REQUIRED = 'CMAN_UDPU_RESTART_REQUIRED' CMAN_UNSUPPORTED_COMMAND = "CMAN_UNSUPPORTED_COMMAND" @@ -33,6 +45,9 @@ COROSYNC_NOT_RUNNING_CHECK_STARTED = "COROSYNC_NOT_RUNNING_CHECK_STARTED" COROSYNC_NOT_RUNNING_CHECK_NODE_ERROR = "COROSYNC_NOT_RUNNING_CHECK_NODE_ERROR" COROSYNC_NOT_RUNNING_ON_NODE = "COROSYNC_NOT_RUNNING_ON_NODE" +COROSYNC_OPTIONS_INCOMPATIBLE_WITH_QDEVICE = "COROSYNC_OPTIONS_INCOMPATIBLE_WITH_QDEVICE" +COROSYNC_QUORUM_GET_STATUS_ERROR = "COROSYNC_QUORUM_GET_STATUS_ERROR" +COROSYNC_QUORUM_SET_EXPECTED_VOTES_ERROR = "COROSYNC_QUORUM_SET_EXPECTED_VOTES_ERROR" COROSYNC_RUNNING_ON_NODE = "COROSYNC_RUNNING_ON_NODE" CRM_MON_ERROR = "CRM_MON_ERROR" DUPLICATE_CONSTRAINTS_EXIST = "DUPLICATE_CONSTRAINTS_EXIST" @@ -45,25 +60,43 @@ INVALID_OPTION = "INVALID_OPTION" INVALID_OPTION_VALUE = "INVALID_OPTION_VALUE" INVALID_RESOURCE_NAME = 'INVALID_RESOURCE_NAME' +INVALID_RESPONSE_FORMAT = "INVALID_RESPONSE_FORMAT" INVALID_SCORE = "INVALID_SCORE" INVALID_TIMEOUT_VALUE = "INVALID_TIMEOUT_VALUE" MULTIPLE_SCORE_OPTIONS = "MULTIPLE_SCORE_OPTIONS" -NODE_COMMUNICATION_ERROR = "NODE_COMMUNICATION_ERROR", -NODE_COMMUNICATION_ERROR_NOT_AUTHORIZED = "NODE_COMMUNICATION_ERROR_NOT_AUTHORIZED", -NODE_COMMUNICATION_ERROR_PERMISSION_DENIED = "NODE_COMMUNICATION_ERROR_PERMISSION_DENIED", -NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT = "NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT", -NODE_COMMUNICATION_ERROR_UNSUPPORTED_COMMAND = "NODE_COMMUNICATION_ERROR_UNSUPPORTED_COMMAND", +NODE_COMMUNICATION_COMMAND_UNSUCCESSFUL = "NODE_COMMUNICATION_COMMAND_UNSUCCESSFUL" +NODE_COMMUNICATION_ERROR = "NODE_COMMUNICATION_ERROR" +NODE_COMMUNICATION_ERROR_NOT_AUTHORIZED = "NODE_COMMUNICATION_ERROR_NOT_AUTHORIZED" +NODE_COMMUNICATION_ERROR_PERMISSION_DENIED = "NODE_COMMUNICATION_ERROR_PERMISSION_DENIED" +NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT = "NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT" +NODE_COMMUNICATION_ERROR_UNSUPPORTED_COMMAND = "NODE_COMMUNICATION_ERROR_UNSUPPORTED_COMMAND" NODE_COMMUNICATION_FINISHED = "NODE_COMMUNICATION_FINISHED" NODE_COMMUNICATION_NOT_CONNECTED = "NODE_COMMUNICATION_NOT_CONNECTED" NODE_COMMUNICATION_STARTED = "NODE_COMMUNICATION_STARTED" NODE_NOT_FOUND = "NODE_NOT_FOUND" NON_UDP_TRANSPORT_ADDR_MISMATCH = 'NON_UDP_TRANSPORT_ADDR_MISMATCH' +OMITTING_NODE = "OMITTING_NODE" PACEMAKER_LOCAL_NODE_NAME_NOT_FOUND = "PACEMAKER_LOCAL_NODE_NAME_NOT_FOUND" -PARSE_ERROR_COROSYNC_CONF_MISSING_CLOSING_BRACE = "PARSE_ERROR_COROSYNC_CONF_MISSING_CLOSING_BRACE", -PARSE_ERROR_COROSYNC_CONF = "PARSE_ERROR_COROSYNC_CONF", -PARSE_ERROR_COROSYNC_CONF_UNEXPECTED_CLOSING_BRACE = "PARSE_ERROR_COROSYNC_CONF_UNEXPECTED_CLOSING_BRACE", +PARSE_ERROR_COROSYNC_CONF_MISSING_CLOSING_BRACE = "PARSE_ERROR_COROSYNC_CONF_MISSING_CLOSING_BRACE" +PARSE_ERROR_COROSYNC_CONF = "PARSE_ERROR_COROSYNC_CONF" +PARSE_ERROR_COROSYNC_CONF_UNEXPECTED_CLOSING_BRACE = "PARSE_ERROR_COROSYNC_CONF_UNEXPECTED_CLOSING_BRACE" QDEVICE_ALREADY_DEFINED = "QDEVICE_ALREADY_DEFINED" +QDEVICE_ALREADY_INITIALIZED = "QDEVICE_ALREADY_INITIALIZED" +QDEVICE_CERTIFICATE_ACCEPTED_BY_NODE = "QDEVICE_CERTIFICATE_ACCEPTED_BY_NODE" +QDEVICE_CERTIFICATE_DISTRIBUTION_STARTED = "QDEVICE_CERTIFICATE_DISTRIBUTION_STARTED" +QDEVICE_CERTIFICATE_REMOVAL_STARTED = "QDEVICE_CERTIFICATE_REMOVAL_STARTED" +QDEVICE_CERTIFICATE_REMOVED_FROM_NODE = "QDEVICE_CERTIFICATE_REMOVED_FROM_NODE" +QDEVICE_CERTIFICATE_IMPORT_ERROR = "QDEVICE_CERTIFICATE_IMPORT_ERROR" +QDEVICE_CERTIFICATE_SIGN_ERROR = "QDEVICE_CERTIFICATE_SIGN_ERROR" +QDEVICE_DESTROY_ERROR = "QDEVICE_DESTROY_ERROR" +QDEVICE_DESTROY_SUCCESS = "QDEVICE_DESTROY_SUCCESS" +QDEVICE_GET_STATUS_ERROR = "QDEVICE_GET_STATUS_ERROR" +QDEVICE_INITIALIZATION_ERROR = "QDEVICE_INITIALIZATION_ERROR" +QDEVICE_INITIALIZATION_SUCCESS = "QDEVICE_INITIALIZATION_SUCCESS" QDEVICE_NOT_DEFINED = "QDEVICE_NOT_DEFINED" +QDEVICE_NOT_INITIALIZED = "QDEVICE_NOT_INITIALIZED" +QDEVICE_CLIENT_RELOAD_STARTED = "QDEVICE_CLIENT_RELOAD_STARTED" +QDEVICE_REMOVE_OR_CLUSTER_STOP_NEEDED = "QDEVICE_REMOVE_OR_CLUSTER_STOP_NEEDED" REQUIRED_OPTION_IS_MISSING = "REQUIRED_OPTION_IS_MISSING" RESOURCE_CLEANUP_ERROR = "RESOURCE_CLEANUP_ERROR" RESOURCE_CLEANUP_TOO_TIME_CONSUMING = 'RESOURCE_CLEANUP_TOO_TIME_CONSUMING' @@ -76,7 +109,34 @@ RUN_EXTERNAL_PROCESS_ERROR = "RUN_EXTERNAL_PROCESS_ERROR" RUN_EXTERNAL_PROCESS_FINISHED = "RUN_EXTERNAL_PROCESS_FINISHED" RUN_EXTERNAL_PROCESS_STARTED = "RUN_EXTERNAL_PROCESS_STARTED" +SBD_CHECK_STARTED = "SBD_CHECK_STARTED" +SBD_CHECK_SUCCESS = "SBD_CHECK_SUCCESS" +SBD_CONFIG_DISTRIBUTION_STARTED = "SBD_CONFIG_DISTRIBUTION_STARTED" +SBD_CONFIG_ACCEPTED_BY_NODE = "SBD_CONFIG_ACCEPTED_BY_NODE" +SBD_DISABLING_STARTED = "SBD_DISABLING_STARTED" +SBD_ENABLING_STARTED = "SBD_ENABLING_STARTED" +SBD_NOT_INSTALLED = "SBD_NOT_INSTALLED" +SBD_NOT_ENABLED = "SBD_NOT_ENABLED" +SERVICE_DISABLE_ERROR = "SERVICE_DISABLE_ERROR" +SERVICE_DISABLE_STARTED = "SERVICE_DISABLE_STARTED" +SERVICE_DISABLE_SUCCESS = "SERVICE_DISABLE_SUCCESS" +SERVICE_ENABLE_ERROR = "SERVICE_ENABLE_ERROR" +SERVICE_ENABLE_STARTED = "SERVICE_ENABLE_STARTED" +SERVICE_ENABLE_SKIPPED = "SERVICE_ENABLE_SKIPPED" +SERVICE_ENABLE_SUCCESS = "SERVICE_ENABLE_SUCCESS" +SERVICE_KILL_ERROR = "SERVICE_KILL_ERROR" +SERVICE_KILL_SUCCESS = "SERVICE_KILL_SUCCESS" +SERVICE_START_ERROR = "SERVICE_START_ERROR" +SERVICE_START_SKIPPED = "SERVICE_START_SKIPPED" +SERVICE_START_STARTED = "SERVICE_START_STARTED" +SERVICE_START_SUCCESS = "SERVICE_START_SUCCESS" +SERVICE_STOP_ERROR = "SERVICE_STOP_ERROR" +SERVICE_STOP_STARTED = "SERVICE_STOP_STARTED" +SERVICE_STOP_SUCCESS = "SERVICE_STOP_SUCCESS" UNABLE_TO_GET_AGENT_METADATA = 'UNABLE_TO_GET_AGENT_METADATA' UNABLE_TO_READ_COROSYNC_CONFIG = "UNABLE_TO_READ_COROSYNC_CONFIG" +UNABLE_TO_GET_SBD_CONFIG = "UNABLE_TO_GET_SBD_CONFIG" +UNABLE_TO_GET_SBD_STATUS = "UNABLE_TO_GET_SBD_STATUS" UNKNOWN_COMMAND = 'UNKNOWN_COMMAND' -UNSUPPORTED_RESOURCE_AGENT = 'UNSUPPORTED_RESOURCE_AGENT' +UNSUPPORTED_AGENT = 'UNSUPPORTED_AGENT' +WATCHDOG_NOT_FOUND = "WATCHDOG_NOT_FOUND" diff -Nru pcs-0.9.151/pcs/common/tools.py pcs-0.9.153/pcs/common/tools.py --- pcs-0.9.151/pcs/common/tools.py 2016-05-23 13:20:12.000000000 +0000 +++ pcs-0.9.153/pcs/common/tools.py 2016-07-01 13:31:39.000000000 +0000 @@ -5,6 +5,8 @@ unicode_literals, ) +import threading + def simple_cache(func): cache = { @@ -19,3 +21,15 @@ return cache["value"] return wrapper + + +def run_parallel(worker, data_list): + thread_list = [] + for args, kwargs in data_list: + thread = threading.Thread(target=worker, args=args, kwargs=kwargs) + thread.daemon = True + thread_list.append(thread) + thread.start() + + for thread in thread_list: + thread.join() diff -Nru pcs-0.9.151/pcs/config.py pcs-0.9.153/pcs/config.py --- pcs-0.9.151/pcs/config.py 2016-05-23 13:20:12.000000000 +0000 +++ pcs-0.9.153/pcs/config.py 2016-07-01 13:31:39.000000000 +0000 @@ -38,6 +38,7 @@ stonith, usage, utils, + alert, ) from pcs.lib.errors import LibraryError from pcs.lib.commands import quorum as lib_quorum @@ -123,6 +124,9 @@ ticket_command.show(lib, [], modificators) print() + alert.print_alert_config(lib, [], modificators) + + print() del utils.pcs_options["--all"] print("Resources Defaults:") resource.show_defaults("rsc_defaults", indent=" ") diff -Nru pcs-0.9.151/pcs/constraint.py pcs-0.9.153/pcs/constraint.py --- pcs-0.9.151/pcs/constraint.py 2016-05-23 13:20:12.000000000 +0000 +++ pcs-0.9.153/pcs/constraint.py 2016-07-01 13:31:39.000000000 +0000 @@ -92,11 +92,12 @@ "add": ticket_command.add, "show": ticket_command.show, } - if argv[0] not in command_map: + sub_command = argv[0] if argv else "show" + if sub_command not in command_map: raise CmdLineInputError() - usage_name = "ticket "+argv[0] + usage_name = "ticket "+sub_command - command_map[argv[0]](lib, argv[1:], modificators) + command_map[sub_command](lib, argv[1:], modificators) except LibraryError as e: utils.process_library_reports(e.args) except CmdLineInputError as e: diff -Nru pcs-0.9.151/pcs/lib/cib/alert.py pcs-0.9.153/pcs/lib/cib/alert.py --- pcs-0.9.151/pcs/lib/cib/alert.py 1970-01-01 00:00:00.000000000 +0000 +++ pcs-0.9.153/pcs/lib/cib/alert.py 2016-07-01 13:31:39.000000000 +0000 @@ -0,0 +1,281 @@ +from __future__ import ( + absolute_import, + division, + print_function, + unicode_literals, +) + +from lxml import etree + +from pcs.lib import reports +from pcs.lib.errors import LibraryError +from pcs.lib.cib.nvpair import update_nvset, get_nvset +from pcs.lib.cib.tools import ( + check_new_id_applicable, + get_sub_element, + find_unique_id, + get_alerts, +) + + +def update_instance_attributes(tree, element, attribute_dict): + """ + Updates instance attributes of element. Returns updated instance + attributes element. + + tree -- cib etree node + element -- parent element of instance attributes + attribute_dict -- dictionary of nvpairs + """ + return update_nvset("instance_attributes", tree, element, attribute_dict) + + +def update_meta_attributes(tree, element, attribute_dict): + """ + Updates meta attributes of element. Returns updated meta attributes element. + + tree -- cib etree node + element -- parent element of meta attributes + attribute_dict -- dictionary of nvpairs + """ + return update_nvset("meta_attributes", tree, element, attribute_dict) + + +def _update_optional_attribute(element, attribute, value): + """ + Update optional attribute of element. Remove existing element if value + is empty. + + element -- parent element of specified attribute + attribute -- attribute to be updated + value -- new value + """ + if value is None: + return + if value: + element.set(attribute, value) + elif attribute in element.attrib: + del element.attrib[attribute] + + +def get_alert_by_id(tree, alert_id): + """ + Returns alert element with specified id. + Raises AlertNotFound if alert with specified id doesn't exist. + + tree -- cib etree node + alert_id -- id of alert + """ + alert = get_alerts(tree).find("./alert[@id='{0}']".format(alert_id)) + if alert is None: + raise LibraryError(reports.cib_alert_not_found(alert_id)) + return alert + + +def get_recipient(alert, recipient_value): + """ + Returns recipient element with value recipient_value which belong to + specified alert. + Raises RecipientNotFound if recipient doesn't exist. + + alert -- parent element of required recipient + recipient_value -- value of recipient + """ + recipient = alert.find( + "./recipient[@value='{0}']".format(recipient_value) + ) + if recipient is None: + raise LibraryError(reports.cib_alert_recipient_not_found( + alert.get("id"), recipient_value + )) + return recipient + + +def create_alert(tree, alert_id, path, description=""): + """ + Create new alert element. Returns newly created element. + Raises LibraryError if element with specified id already exists. + + tree -- cib etree node + alert_id -- id of new alert, it will be generated if it is None + path -- path to script + description -- description + """ + if alert_id: + check_new_id_applicable(tree, "alert-id", alert_id) + else: + alert_id = find_unique_id(tree, "alert") + + alert = etree.SubElement(get_alerts(tree), "alert", id=alert_id, path=path) + if description: + alert.set("description", description) + + return alert + + +def update_alert(tree, alert_id, path, description=None): + """ + Update existing alert. Return updated alert element. + Raises AlertNotFound if alert with specified id doesn't exist. + + tree -- cib etree node + alert_id -- id of alert to be updated + path -- new value of path, stay unchanged if None + description -- new value of description, stay unchanged if None, remove + if empty + """ + alert = get_alert_by_id(tree, alert_id) + if path: + alert.set("path", path) + _update_optional_attribute(alert, "description", description) + return alert + + +def remove_alert(tree, alert_id): + """ + Remove alert with specified id. + Raises AlertNotFound if alert with specified id doesn't exist. + + tree -- cib etree node + alert_id -- id of alert which should be removed + """ + alert = get_alert_by_id(tree, alert_id) + alert.getparent().remove(alert) + + +def add_recipient( + tree, + alert_id, + recipient_value, + description="" +): + """ + Add recipient to alert with specified id. Returns added recipient element. + Raises AlertNotFound if alert with specified id doesn't exist. + Raises LibraryError if recipient already exists. + + tree -- cib etree node + alert_id -- id of alert which should be parent of new recipient + recipient_value -- value of recipient + description -- description of recipient + """ + alert = get_alert_by_id(tree, alert_id) + + recipient = alert.find( + "./recipient[@value='{0}']".format(recipient_value) + ) + if recipient is not None: + raise LibraryError(reports.cib_alert_recipient_already_exists( + alert_id, recipient_value + )) + + recipient = etree.SubElement( + alert, + "recipient", + id=find_unique_id(tree, "{0}-recipient".format(alert_id)), + value=recipient_value + ) + + if description: + recipient.set("description", description) + + return recipient + + +def update_recipient(tree, alert_id, recipient_value, description): + """ + Update specified recipient. Returns updated recipient element. + Raises AlertNotFound if alert with specified id doesn't exist. + Raises RecipientNotFound if recipient doesn't exist. + + tree -- cib etree node + alert_id -- id of alert, parent element of recipient + recipient_value -- recipient value + description -- description, if empty it will be removed, stay unchanged + if None + """ + recipient = get_recipient( + get_alert_by_id(tree, alert_id), recipient_value + ) + _update_optional_attribute(recipient, "description", description) + return recipient + + +def remove_recipient(tree, alert_id, recipient_value): + """ + Remove specified recipient. + Raises AlertNotFound if alert with specified id doesn't exist. + Raises RecipientNotFound if recipient doesn't exist. + + tree -- cib etree node + alert_id -- id of alert, parent element of recipient + recipient_value -- recipient value + """ + recipient = get_recipient( + get_alert_by_id(tree, alert_id), recipient_value + ) + recipient.getparent().remove(recipient) + + +def get_all_recipients(alert): + """ + Returns list of all recipient of specified alert. Format: + [ + { + "id": , + "value": , + "description": , + "instance_attributes": , + "meta_attributes": + } + ] + + alert -- parent element of recipients to return + """ + recipient_list = [] + for recipient in alert.findall("./recipient"): + recipient_list.append({ + "id": recipient.get("id"), + "value": recipient.get("value"), + "description": recipient.get("description", ""), + "instance_attributes": get_nvset( + get_sub_element(recipient, "instance_attributes") + ), + "meta_attributes": get_nvset( + get_sub_element(recipient, "meta_attributes") + ) + }) + return recipient_list + + +def get_all_alerts(tree): + """ + Returns list of all alerts specified in tree. Format: + [ + { + "id": , + "path": , + "description": , + "instance_attributes": , + "meta_attributes": , + "recipients_list": + } + ] + + tree -- cib etree node + """ + alert_list = [] + for alert in get_alerts(tree).findall("./alert"): + alert_list.append({ + "id": alert.get("id"), + "path": alert.get("path"), + "description": alert.get("description", ""), + "instance_attributes": get_nvset( + get_sub_element(alert, "instance_attributes") + ), + "meta_attributes": get_nvset( + get_sub_element(alert, "meta_attributes") + ), + "recipient_list": get_all_recipients(alert) + }) + return alert_list diff -Nru pcs-0.9.151/pcs/lib/cib/constraint/ticket.py pcs-0.9.153/pcs/lib/cib/constraint/ticket.py --- pcs-0.9.151/pcs/lib/cib/constraint/ticket.py 2016-05-23 13:20:12.000000000 +0000 +++ pcs-0.9.153/pcs/lib/cib/constraint/ticket.py 2016-07-01 13:31:39.000000000 +0000 @@ -52,7 +52,7 @@ validate_id=partial(tools.check_new_id_applicable, cib, DESCRIPTION), ) report = _validate_options_common(options) - if "ticket" not in options: + if "ticket" not in options or not options["ticket"].strip(): report.append(reports.required_option_is_missing('ticket')) if report: raise LibraryError(*report) diff -Nru pcs-0.9.151/pcs/lib/cib/nvpair.py pcs-0.9.153/pcs/lib/cib/nvpair.py --- pcs-0.9.151/pcs/lib/cib/nvpair.py 1970-01-01 00:00:00.000000000 +0000 +++ pcs-0.9.153/pcs/lib/cib/nvpair.py 2016-07-01 13:31:39.000000000 +0000 @@ -0,0 +1,90 @@ +from __future__ import ( + absolute_import, + division, + print_function, + unicode_literals, +) + +from lxml import etree + +from pcs.lib.cib.tools import ( + get_sub_element, + find_unique_id, +) + + +def update_nvpair(tree, element, name, value): + """ + Update nvpair, create new if it doesn't yet exist or remove existing + nvpair if value is empty. Returns created/updated/removed nvpair element. + + tree -- cib etree node + element -- element in which nvpair should be added/updated/removed + name -- name of nvpair + value -- value of nvpair + """ + nvpair = element.find("./nvpair[@name='{0}']".format(name)) + if nvpair is None: + if not value: + return None + nvpair_id = find_unique_id( + tree, "{0}-{1}".format(element.get("id"), name) + ) + nvpair = etree.SubElement( + element, "nvpair", id=nvpair_id, name=name, value=value + ) + else: + if value: + nvpair.set("value", value) + else: + # remove nvpair if value is empty + element.remove(nvpair) + return nvpair + + +def update_nvset(tag_name, tree, element, attribute_dict): + """ + This method updates nvset specified by tag_name. If specified nvset + doesn't exist it will be created. Returns updated nvset element or None if + attribute_dict is empty. + + tag_name -- tag name of nvset element + tree -- cib etree node + element -- parent element of nvset + attribute_dict -- dictionary of nvpairs + """ + if not attribute_dict: + return None + + attributes = get_sub_element(element, tag_name, find_unique_id( + tree, "{0}-{1}".format(element.get("id"), tag_name) + ), 0) + + for name, value in sorted(attribute_dict.items()): + update_nvpair(tree, attributes, name, value) + + return attributes + + +def get_nvset(nvset): + """ + Returns nvset element as list of nvpairs with format: + [ + { + "id": , + "name": , + "value": + }, + ... + ] + + nvset -- nvset element + """ + nvpair_list = [] + for nvpair in nvset.findall("./nvpair"): + nvpair_list.append({ + "id": nvpair.get("id"), + "name": nvpair.get("name"), + "value": nvpair.get("value", "") + }) + return nvpair_list diff -Nru pcs-0.9.151/pcs/lib/cib/resource.py pcs-0.9.153/pcs/lib/cib/resource.py --- pcs-0.9.151/pcs/lib/cib/resource.py 2016-05-23 13:20:12.000000000 +0000 +++ pcs-0.9.153/pcs/lib/cib/resource.py 2016-07-01 13:31:39.000000000 +0000 @@ -9,7 +9,7 @@ TAGS_ALL = TAGS_CLONE + ("primitive", "group") def find_by_id(tree, id): - element = tree.find('.//*[@id="{0}"]'.format(id)) - if element is None or element.tag not in TAGS_ALL: - return None - return element + for element in tree.findall('.//*[@id="{0}"]'.format(id)): + if element is not None and element.tag in TAGS_ALL: + return element + return None diff -Nru pcs-0.9.151/pcs/lib/cib/test/test_alert.py pcs-0.9.153/pcs/lib/cib/test/test_alert.py --- pcs-0.9.151/pcs/lib/cib/test/test_alert.py 1970-01-01 00:00:00.000000000 +0000 +++ pcs-0.9.153/pcs/lib/cib/test/test_alert.py 2016-07-01 13:31:39.000000000 +0000 @@ -0,0 +1,931 @@ +from __future__ import ( + absolute_import, + division, + print_function, + unicode_literals, +) + +from unittest import TestCase + +from lxml import etree + +from pcs.common import report_codes +from pcs.lib.cib import alert +from pcs.lib.errors import ReportItemSeverity as severities +from pcs.test.tools.assertions import( + assert_raise_library_error, + assert_xml_equal, +) +from pcs.test.tools.pcs_mock import mock + + +@mock.patch("pcs.lib.cib.alert.update_nvset") +class UpdateInstanceAttributesTest(TestCase): + def test_success(self, mock_update_nvset): + ret_val = etree.Element("nvset") + tree = etree.Element("tree") + element = etree.Element("element") + attributes = {"a": 1} + mock_update_nvset.return_value = ret_val + self.assertEqual( + alert.update_instance_attributes(tree, element, attributes), + ret_val + ) + mock_update_nvset.assert_called_once_with( + "instance_attributes", tree, element, attributes + ) + + +@mock.patch("pcs.lib.cib.alert.update_nvset") +class UpdateMetaAttributesTest(TestCase): + def test_success(self, mock_update_nvset): + ret_val = etree.Element("nvset") + tree = etree.Element("tree") + element = etree.Element("element") + attributes = {"a": 1} + mock_update_nvset.return_value = ret_val + self.assertEqual( + alert.update_meta_attributes(tree, element, attributes), + ret_val + ) + mock_update_nvset.assert_called_once_with( + "meta_attributes", tree, element, attributes + ) + + +class UpdateOptionalAttributeTest(TestCase): + def test_add(self): + element = etree.Element("element") + alert._update_optional_attribute(element, "attr", "value1") + self.assertEqual(element.get("attr"), "value1") + + def test_update(self): + element = etree.Element("element", attr="value") + alert._update_optional_attribute(element, "attr", "value1") + self.assertEqual(element.get("attr"), "value1") + + def test_remove(self): + element = etree.Element("element", attr="value") + alert._update_optional_attribute(element, "attr", "") + self.assertTrue(element.get("attr") is None) + + +class GetAlertByIdTest(TestCase): + def test_found(self): + xml = """ + + + + + + + + + """ + assert_xml_equal( + '', + etree.tostring( + alert.get_alert_by_id(etree.XML(xml), "alert-2") + ).decode() + ) + + def test_different_place(self): + xml = """ + + + + + + + + + """ + assert_raise_library_error( + lambda: alert.get_alert_by_id(etree.XML(xml), "alert-2"), + ( + severities.ERROR, + report_codes.CIB_ALERT_NOT_FOUND, + {"alert": "alert-2"} + ) + ) + + def test_not_exist(self): + xml = """ + + + + + + + + """ + assert_raise_library_error( + lambda: alert.get_alert_by_id(etree.XML(xml), "alert-2"), + ( + severities.ERROR, + report_codes.CIB_ALERT_NOT_FOUND, + {"alert": "alert-2"} + ) + ) + + +class GetRecipientTest(TestCase): + def setUp(self): + self.xml = etree.XML( + """ + + + + + + + + + """ + ) + + def test_exist(self): + assert_xml_equal( + '', + etree.tostring(alert.get_recipient(self.xml, "value2")).decode() + ) + + def test_different_place(self): + assert_raise_library_error( + lambda: alert.get_recipient(self.xml, "value4"), + ( + severities.ERROR, + report_codes.CIB_ALERT_RECIPIENT_NOT_FOUND, + { + "alert": "alert-1", + "recipient": "value4" + } + ) + ) + + def test_not_recipient(self): + assert_raise_library_error( + lambda: alert.get_recipient(self.xml, "value3"), + ( + severities.ERROR, + report_codes.CIB_ALERT_RECIPIENT_NOT_FOUND, + { + "alert": "alert-1", + "recipient": "value3" + } + ) + ) + + +class CreateAlertTest(TestCase): + def setUp(self): + self.tree = etree.XML( + """ + + + + + + + + """ + ) + + def test_no_alerts(self): + tree = etree.XML( + """ + + + + """ + ) + assert_xml_equal( + '', + etree.tostring( + alert.create_alert(tree, "my-alert", "/test/path") + ).decode() + ) + assert_xml_equal( + """ + + + + + + + + """, + etree.tostring(tree).decode() + ) + + def test_alerts_exists(self): + assert_xml_equal( + '', + etree.tostring( + alert.create_alert(self.tree, "my-alert", "/test/path") + ).decode() + ) + assert_xml_equal( + """ + + + + + + + + + """, + etree.tostring(self.tree).decode() + ) + + def test_alerts_exists_with_description(self): + assert_xml_equal( + '', + etree.tostring(alert.create_alert( + self.tree, "my-alert", "/test/path", "nothing" + )).decode() + ) + assert_xml_equal( + """ + + + + + + + + + """, + etree.tostring(self.tree).decode() + ) + + def test_invalid_id(self): + assert_raise_library_error( + lambda: alert.create_alert(self.tree, "1alert", "/path"), + ( + severities.ERROR, + report_codes.INVALID_ID, + { + "id": "1alert", + "id_description": "alert-id", + "invalid_character": "1", + "reason": "invalid first character" + } + ) + ) + + def test_id_exists(self): + assert_raise_library_error( + lambda: alert.create_alert(self.tree, "alert", "/path"), + ( + severities.ERROR, + report_codes.ID_ALREADY_EXISTS, + {"id": "alert"} + ) + ) + + def test_no_id(self): + assert_xml_equal( + '', + etree.tostring( + alert.create_alert(self.tree, None, "/test/path") + ).decode() + ) + assert_xml_equal( + """ + + + + + + + + + """, + etree.tostring(self.tree).decode() + ) + + +class UpdateAlertTest(TestCase): + def setUp(self): + self.tree = etree.XML( + """ + + + + + + + + + """ + ) + + def test_update_path(self): + assert_xml_equal( + '', + etree.tostring( + alert.update_alert(self.tree, "alert", "/test/path") + ).decode() + ) + assert_xml_equal( + """ + + + + + + + + + """, + etree.tostring(self.tree).decode() + ) + + def test_remove_path(self): + assert_xml_equal( + '', + etree.tostring(alert.update_alert(self.tree, "alert", "")).decode() + ) + assert_xml_equal( + """ + + + + + + + + + """, + etree.tostring(self.tree).decode() + ) + + def test_update_description(self): + assert_xml_equal( + '', + etree.tostring( + alert.update_alert(self.tree, "alert", None, "desc") + ).decode() + ) + assert_xml_equal( + """ + + + + + + + + + """, + etree.tostring(self.tree).decode() + ) + + def test_remove_description(self): + assert_xml_equal( + '', + etree.tostring( + alert.update_alert(self.tree, "alert1", None, "") + ).decode() + ) + assert_xml_equal( + """ + + + + + + + + + """, + etree.tostring(self.tree).decode() + ) + + def test_id_not_exists(self): + assert_raise_library_error( + lambda: alert.update_alert(self.tree, "alert0", "/test"), + ( + severities.ERROR, + report_codes.CIB_ALERT_NOT_FOUND, + {"alert": "alert0"} + ) + ) + + +class RemoveAlertTest(TestCase): + def setUp(self): + self.tree = etree.XML( + """ + + + + + + + + + """ + ) + + def test_success(self): + alert.remove_alert(self.tree, "alert") + assert_xml_equal( + """ + + + + + + + + """, + etree.tostring(self.tree).decode() + ) + + def test_not_existing_id(self): + assert_raise_library_error( + lambda: alert.remove_alert(self.tree, "not-existing-id"), + ( + severities.ERROR, + report_codes.CIB_ALERT_NOT_FOUND, + {"alert": "not-existing-id"} + ) + ) + + +class AddRecipientTest(TestCase): + def setUp(self): + self.tree = etree.XML( + """ + + + + + + + + + + """ + ) + + def test_success(self): + assert_xml_equal( + '', + etree.tostring( + alert.add_recipient(self.tree, "alert", "value1") + ).decode() + ) + assert_xml_equal( + """ + + + + + + + + + + + """, + etree.tostring(self.tree).decode() + ) + + def test_recipient_exist(self): + assert_raise_library_error( + lambda: alert.add_recipient(self.tree, "alert", "test_val"), + ( + severities.ERROR, + report_codes.CIB_ALERT_RECIPIENT_ALREADY_EXISTS, + { + "recipient": "test_val", + "alert": "alert" + } + ) + ) + + def test_alert_not_exist(self): + assert_raise_library_error( + lambda: alert.add_recipient(self.tree, "alert1", "test_val"), + ( + severities.ERROR, + report_codes.CIB_ALERT_NOT_FOUND, + {"alert": "alert1"} + ) + ) + + def test_with_description(self): + assert_xml_equal( + """ + + """, + etree.tostring(alert.add_recipient( + self.tree, "alert", "value1", "desc" + )).decode() + ) + assert_xml_equal( + """ + + + + + + + + + + + """, + etree.tostring(self.tree).decode() + ) + + +class UpdateRecipientTest(TestCase): + def setUp(self): + self.tree = etree.XML( + """ + + + + + + + + + + + """ + ) + + def test_add_description(self): + assert_xml_equal( + """ + + """, + etree.tostring(alert.update_recipient( + self.tree, "alert", "test_val", "description" + )).decode() + ) + assert_xml_equal( + """ + + + + + + + + + + + """, + etree.tostring(self.tree).decode() + ) + + def test_update_description(self): + assert_xml_equal( + """ + + """, + etree.tostring(alert.update_recipient( + self.tree, "alert", "value1", "description" + )).decode() + ) + assert_xml_equal( + """ + + + + + + + + + + + """, + etree.tostring(self.tree).decode() + ) + + def test_remove_description(self): + assert_xml_equal( + """ + + """, + etree.tostring( + alert.update_recipient(self.tree, "alert", "value1", "") + ).decode() + ) + assert_xml_equal( + """ + + + + + + + + + + + """, + etree.tostring(self.tree).decode() + ) + + def test_alert_not_exists(self): + assert_raise_library_error( + lambda: alert.update_recipient(self.tree, "alert1", "test_val", ""), + ( + severities.ERROR, + report_codes.CIB_ALERT_NOT_FOUND, + {"alert": "alert1"} + ) + ) + + def test_recipient_not_exists(self): + assert_raise_library_error( + lambda: alert.update_recipient(self.tree, "alert", "unknown", ""), + ( + severities.ERROR, + report_codes.CIB_ALERT_RECIPIENT_NOT_FOUND, + { + "alert": "alert", + "recipient": "unknown" + } + ) + ) + + +class RemoveRecipientTest(TestCase): + def setUp(self): + self.tree = etree.XML( + """ + + + + + + + + + + + """ + ) + + def test_success(self): + alert.remove_recipient(self.tree, "alert", "val") + assert_xml_equal( + """ + + + + + + + + + + """, + etree.tostring(self.tree).decode() + ) + + def test_alert_not_exists(self): + assert_raise_library_error( + lambda: alert.remove_recipient(self.tree, "alert1", "test_val"), + ( + severities.ERROR, + report_codes.CIB_ALERT_NOT_FOUND, + {"alert": "alert1"} + ) + ) + + def test_recipient_not_exists(self): + assert_raise_library_error( + lambda: alert.remove_recipient(self.tree, "alert", "unknown"), + ( + severities.ERROR, + report_codes.CIB_ALERT_RECIPIENT_NOT_FOUND, + { + "alert": "alert", + "recipient": "unknown" + } + ) + ) + + +class GetAllRecipientsTest(TestCase): + def test_success(self): + alert_obj = etree.XML( + """ + + + + + + + + + + + + + """ + ) + self.assertEqual( + [ + { + "id": "alert-recipient", + "value": "test_val", + "description": "", + "instance_attributes": [ + { + "id": "nvset-name1-value1", + "name": "name1", + "value": "value1" + }, + { + "id": "nvset-name2-value2", + "name": "name2", + "value": "value2" + } + ], + "meta_attributes": [ + { + "id": "nvset-name3", + "name": "name3", + "value": "" + } + ] + }, + { + "id": "alert-recipient-1", + "value": "value1", + "description": "desc", + "instance_attributes": [], + "meta_attributes": [] + } + ], + alert.get_all_recipients(alert_obj) + ) + + +class GetAllAlertsTest(TestCase): + def test_success(self): + alerts = etree.XML( + """ + + + + + + + + + + + + + + + + + + + + + + + + + + + + """ + ) + self.assertEqual( + [ + { + "id": "alert", + "path": "/path", + "description": "", + "instance_attributes": [], + "meta_attributes": [], + "recipient_list": [ + { + "id": "alert-recipient", + "value": "test_val", + "description": "", + "instance_attributes": [ + { + "id": "instance_attributes-name1-value1", + "name": "name1", + "value": "value1" + }, + { + "id": "instance_attributes-name2-value2", + "name": "name2", + "value": "value2" + } + ], + "meta_attributes": [ + { + "id": "meta_attributes-name3", + "name": "name3", + "value": "" + } + ] + }, + { + "id": "alert-recipient-1", + "value": "value1", + "description": "desc", + "instance_attributes": [], + "meta_attributes": [] + } + ] + }, + { + "id": "alert1", + "path": "/test/path", + "description": "desc", + "instance_attributes": [ + { + "id": "alert1-name1-value1", + "name": "name1", + "value": "value1" + }, + { + "id": "alert1-name2-value2", + "name": "name2", + "value": "value2" + } + ], + "meta_attributes": [ + { + "id": "alert1-name3", + "name": "name3", + "value": "" + } + ], + "recipient_list": [] + } + ], + alert.get_all_alerts(alerts) + ) diff -Nru pcs-0.9.151/pcs/lib/cib/test/test_constraint_ticket.py pcs-0.9.153/pcs/lib/cib/test/test_constraint_ticket.py --- pcs-0.9.151/pcs/lib/cib/test/test_constraint_ticket.py 2016-05-23 13:20:12.000000000 +0000 +++ pcs-0.9.153/pcs/lib/cib/test/test_constraint_ticket.py 2016-07-01 13:31:39.000000000 +0000 @@ -222,6 +222,19 @@ ) ) + def test_refuse_empty_ticket(self, _): + assert_raise_library_error( + lambda: self.prepare({ + "loss-policy": "stop", + "id": "id", + "ticket": " " + }), + ( + severities.ERROR, + report_codes.REQUIRED_OPTION_IS_MISSING, + {"option_name": "ticket"} + ) + ) class Element(object): def __init__(self, attrib): diff -Nru pcs-0.9.151/pcs/lib/cib/test/test_nvpair.py pcs-0.9.153/pcs/lib/cib/test/test_nvpair.py --- pcs-0.9.151/pcs/lib/cib/test/test_nvpair.py 1970-01-01 00:00:00.000000000 +0000 +++ pcs-0.9.153/pcs/lib/cib/test/test_nvpair.py 2016-07-01 13:31:39.000000000 +0000 @@ -0,0 +1,206 @@ +from __future__ import ( + absolute_import, + division, + print_function, + unicode_literals, +) + +from unittest import TestCase + +from lxml import etree + +from pcs.lib.cib import nvpair +from pcs.test.tools.assertions import assert_xml_equal + + +class UpdateNvpairTest(TestCase): + def setUp(self): + self.nvset = etree.Element("nvset", id="nvset") + etree.SubElement( + self.nvset, "nvpair", id="nvset-attr", name="attr", value="1" + ) + etree.SubElement( + self.nvset, "nvpair", id="nvset-attr2", name="attr2", value="2" + ) + etree.SubElement( + self.nvset, "notnvpair", id="nvset-test", name="test", value="0" + ) + + def test_update(self): + assert_xml_equal( + "", + etree.tostring( + nvpair.update_nvpair(self.nvset, self.nvset, "attr", "10") + ).decode() + ) + assert_xml_equal( + """ + + + + + + """, + etree.tostring(self.nvset).decode() + ) + + def test_add(self): + assert_xml_equal( + "", + etree.tostring( + nvpair.update_nvpair(self.nvset, self.nvset, "test", "0") + ).decode() + ) + assert_xml_equal( + """ + + + + + + + """, + etree.tostring(self.nvset).decode() + ) + + def test_remove(self): + assert_xml_equal( + "", + etree.tostring( + nvpair.update_nvpair(self.nvset, self.nvset, "attr2", "") + ).decode() + ) + assert_xml_equal( + """ + + + + + """, + etree.tostring(self.nvset).decode() + ) + + def test_remove_not_existing(self): + self.assertTrue( + nvpair.update_nvpair(self.nvset, self.nvset, "attr3", "") is None + ) + assert_xml_equal( + """ + + + + + + """, + etree.tostring(self.nvset).decode() + ) + + +class UpdateNvsetTest(TestCase): + def setUp(self): + self.root = etree.Element("root", id="root") + self.nvset = etree.SubElement(self.root, "nvset", id="nvset") + etree.SubElement( + self.nvset, "nvpair", id="nvset-attr", name="attr", value="1" + ) + etree.SubElement( + self.nvset, "nvpair", id="nvset-attr2", name="attr2", value="2" + ) + etree.SubElement( + self.nvset, "notnvpair", id="nvset-test", name="test", value="0" + ) + + def test_None(self): + self.assertTrue( + nvpair.update_nvset("nvset", self.root, self.root, None) is None + ) + + def test_empty(self): + self.assertTrue( + nvpair.update_nvset("nvset", self.root, self.root, {}) is None + ) + + def test_existing(self): + self.assertEqual( + self.nvset, + nvpair.update_nvset("nvset", self.root, self.root, { + "attr": "10", + "new_one": "20", + "test": "0", + "attr2": "" + }) + ) + assert_xml_equal( + """ + + + + + + + """, + etree.tostring(self.nvset).decode() + ) + + def test_new(self): + root = etree.Element("root", id="root") + assert_xml_equal( + """ + + + + + + """, + etree.tostring(nvpair.update_nvset("nvset", root, root, { + "attr": "10", + "new_one": "20", + "test": "0", + "attr2": "" + })).decode() + ) + assert_xml_equal( + """ + + + + + + + + """, + etree.tostring(root).decode() + ) + + +class GetNvsetTest(TestCase): + def test_success(self): + nvset = etree.XML( + """ + + + + + + """ + ) + self.assertEqual( + [ + { + "id": "nvset-name1", + "name": "name1", + "value": "value1" + }, + { + "id": "nvset-name2", + "name": "name2", + "value": "value2" + }, + { + "id": "nvset-name3", + "name": "name3", + "value": "" + } + ], + nvpair.get_nvset(nvset) + ) diff -Nru pcs-0.9.151/pcs/lib/cib/test/test_resource.py pcs-0.9.153/pcs/lib/cib/test/test_resource.py --- pcs-0.9.151/pcs/lib/cib/test/test_resource.py 1970-01-01 00:00:00.000000000 +0000 +++ pcs-0.9.153/pcs/lib/cib/test/test_resource.py 2016-07-01 13:31:39.000000000 +0000 @@ -0,0 +1,21 @@ +from __future__ import ( + absolute_import, + division, + print_function, + unicode_literals, +) + +from unittest import TestCase +from lxml import etree +from pcs.lib.cib.resource import find_by_id + +class FindByIdTest(TestCase): + def test_find_correct_tag(self): + tree = etree.XML(""" + + + + + """) + element = find_by_id(tree, "A") + self.assertEqual(element.tag, "primitive") diff -Nru pcs-0.9.151/pcs/lib/cib/tools.py pcs-0.9.153/pcs/lib/cib/tools.py --- pcs-0.9.151/pcs/lib/cib/tools.py 2016-05-23 13:20:12.000000000 +0000 +++ pcs-0.9.153/pcs/lib/cib/tools.py 2016-07-01 13:31:39.000000000 +0000 @@ -5,8 +5,12 @@ unicode_literals, ) +import os +import re +import tempfile from lxml import etree +from pcs import settings from pcs.lib import reports from pcs.lib.errors import LibraryError from pcs.lib.pacemaker_values import validate_id @@ -71,6 +75,15 @@ acls = etree.SubElement(get_configuration(tree), "acls") return acls + +def get_alerts(tree): + """ + Return 'alerts' element from tree, create a new one if missing + tree -- cib etree node + """ + return get_sub_element(get_configuration(tree), "alerts") + + def get_constraints(tree): """ Return 'constraint' element from tree @@ -87,3 +100,117 @@ def export_attributes(element): return dict((key, value) for key, value in element.attrib.items()) + + +def get_sub_element(element, sub_element_tag, new_id=None, new_index=None): + """ + Returns sub-element sub_element_tag of element. It will create new + element if such doesn't exist yet. Id of new element will be new_if if + it's not None. new_index specify where will be new element added, if None + it will be appended. + + element -- parent element + sub_element_tag -- tag of wanted element + new_id -- id of new element + new_index -- index for new element + """ + sub_element = element.find("./{0}".format(sub_element_tag)) + if sub_element is None: + sub_element = etree.Element(sub_element_tag) + if new_id: + sub_element.set("id", new_id) + if new_index is None: + element.append(sub_element) + else: + element.insert(new_index, sub_element) + return sub_element + + +def get_pacemaker_version_by_which_cib_was_validated(cib): + """ + Return version of pacemaker which validated specified cib as tree. + Version is returned as tuple of integers: (, , ). + Raises LibraryError on any failure. + + cib -- cib etree + """ + version = cib.get("validate-with") + if version is None: + raise LibraryError(reports.cib_load_error_invalid_format()) + + regexp = re.compile( + r"pacemaker-(?P\d+)\.(?P\d+)(\.(?P\d+))?" + ) + match = regexp.match(version) + if not match: + raise LibraryError(reports.cib_load_error_invalid_format()) + return ( + int(match.group("major")), + int(match.group("minor")), + int(match.group("rev") or 0) + ) + + +def upgrade_cib(cib, runner): + """ + Upgrade CIB to the latest schema of installed pacemaker. Returns upgraded + CIB as string. + Raises LibraryError on any failure. + + cib -- cib etree + runner -- CommandRunner + """ + temp_file = tempfile.NamedTemporaryFile("w+", suffix=".pcs") + temp_file.write(etree.tostring(cib).decode()) + temp_file.flush() + output, retval = runner.run( + [ + os.path.join(settings.pacemaker_binaries, "cibadmin"), + "--upgrade", + "--force" + ], + env_extend={"CIB_file": temp_file.name} + ) + + if retval != 0: + temp_file.close() + LibraryError(reports.cib_upgrade_failed(output)) + + try: + temp_file.seek(0) + return etree.fromstring(temp_file.read()) + except (EnvironmentError, etree.XMLSyntaxError, etree.DocumentInvalid) as e: + LibraryError(reports.cib_upgrade_failed(str(e))) + finally: + temp_file.close() + + +def ensure_cib_version(runner, cib, version): + """ + This method ensures that specified cib is verified by pacemaker with + version 'version' or newer. If cib doesn't correspond to this version, + method will try to upgrade cib. + Returns cib which was verified by pacemaker version 'version' or later. + Raises LibraryError on any failure. + + runner -- CommandRunner + cib -- cib tree + version -- tuple of integers (, , ) + """ + current_version = get_pacemaker_version_by_which_cib_was_validated( + cib + ) + if current_version >= version: + return None + + upgraded_cib = upgrade_cib(cib, runner) + current_version = get_pacemaker_version_by_which_cib_was_validated( + upgraded_cib + ) + + if current_version >= version: + return upgraded_cib + + raise LibraryError(reports.unable_to_upgrade_cib_to_required_version( + current_version, version + )) diff -Nru pcs-0.9.151/pcs/lib/commands/alert.py pcs-0.9.153/pcs/lib/commands/alert.py --- pcs-0.9.151/pcs/lib/commands/alert.py 1970-01-01 00:00:00.000000000 +0000 +++ pcs-0.9.153/pcs/lib/commands/alert.py 2016-07-01 13:31:39.000000000 +0000 @@ -0,0 +1,169 @@ +from __future__ import ( + absolute_import, + division, + print_function, + unicode_literals, +) + +from pcs.lib import reports +from pcs.lib.cib import alert +from pcs.lib.errors import LibraryError + + +REQUIRED_CIB_VERSION = (2, 5, 0) + + +def create_alert( + lib_env, + alert_id, + path, + instance_attribute_dict, + meta_attribute_dict, + description=None +): + """ + Create new alert. + Raises LibraryError if path is not specified, or any other failure. + + lib_env -- LibraryEnvironment + alert_id -- id of alert to be created, if None it will be generated + path -- path to script for alert + instance_attribute_dict -- dictionary of instance attributes + meta_attribute_dict -- dictionary of meta attributes + description -- alert description description + """ + if not path: + raise LibraryError(reports.required_option_is_missing("path")) + + cib = lib_env.get_cib(REQUIRED_CIB_VERSION) + + alert_el = alert.create_alert(cib, alert_id, path, description) + alert.update_instance_attributes(cib, alert_el, instance_attribute_dict) + alert.update_meta_attributes(cib, alert_el, meta_attribute_dict) + + lib_env.push_cib(cib) + + +def update_alert( + lib_env, + alert_id, + path, + instance_attribute_dict, + meta_attribute_dict, + description=None +): + """ + Update existing alert with specified id. + + lib_env -- LibraryEnvironment + alert_id -- id of alert to be updated + path -- new path, if None old value will stay unchanged + instance_attribute_dict -- dictionary of instance attributes to update + meta_attribute_dict -- dictionary of meta attributes to update + description -- new description, if empty string, old description will be + deleted, if None old value will stay unchanged + """ + cib = lib_env.get_cib(REQUIRED_CIB_VERSION) + + alert_el = alert.update_alert(cib, alert_id, path, description) + alert.update_instance_attributes(cib, alert_el, instance_attribute_dict) + alert.update_meta_attributes(cib, alert_el, meta_attribute_dict) + + lib_env.push_cib(cib) + + +def remove_alert(lib_env, alert_id): + """ + Remove alert with specified id. + + lib_env -- LibraryEnvironment + alert_id -- id of alert which should be removed + """ + cib = lib_env.get_cib(REQUIRED_CIB_VERSION) + alert.remove_alert(cib, alert_id) + lib_env.push_cib(cib) + + +def add_recipient( + lib_env, + alert_id, + recipient_value, + instance_attribute_dict, + meta_attribute_dict, + description=None +): + """ + Add new recipient to alert witch id alert_id. + + lib_env -- LibraryEnvironment + alert_id -- id of alert to which new recipient should be added + recipient_value -- value of new recipient + instance_attribute_dict -- dictionary of instance attributes to update + meta_attribute_dict -- dictionary of meta attributes to update + description -- recipient description + """ + if not recipient_value: + raise LibraryError( + reports.required_option_is_missing("value") + ) + + cib = lib_env.get_cib(REQUIRED_CIB_VERSION) + recipient = alert.add_recipient( + cib, alert_id, recipient_value, description + ) + alert.update_instance_attributes(cib, recipient, instance_attribute_dict) + alert.update_meta_attributes(cib, recipient, meta_attribute_dict) + + lib_env.push_cib(cib) + + +def update_recipient( + lib_env, + alert_id, + recipient_value, + instance_attribute_dict, + meta_attribute_dict, + description=None +): + """ + Update existing recipient. + + lib_env -- LibraryEnvironment + alert_id -- id of alert to which recipient belong + recipient_value -- recipient to be updated + instance_attribute_dict -- dictionary of instance attributes to update + meta_attribute_dict -- dictionary of meta attributes to update + description -- new description, if empty string, old description will be + deleted, if None old value will stay unchanged + """ + cib = lib_env.get_cib(REQUIRED_CIB_VERSION) + recipient = alert.update_recipient( + cib, alert_id, recipient_value, description + ) + alert.update_instance_attributes(cib, recipient, instance_attribute_dict) + alert.update_meta_attributes(cib, recipient, meta_attribute_dict) + + lib_env.push_cib(cib) + + +def remove_recipient(lib_env, alert_id, recipient_value): + """ + Remove existing recipient. + + lib_env -- LibraryEnvironment + alert_id -- id of alert to which recipient belong + recipient_value -- recipient to be removed + """ + cib = lib_env.get_cib(REQUIRED_CIB_VERSION) + alert.remove_recipient(cib, alert_id, recipient_value) + lib_env.push_cib(cib) + + +def get_all_alerts(lib_env): + """ + Returns list of all alerts. See docs of pcs.lib.cib.alert.get_all_alerts for + description of data format. + + lib_env -- LibraryEnvironment + """ + return alert.get_all_alerts(lib_env.get_cib()) diff -Nru pcs-0.9.151/pcs/lib/commands/qdevice.py pcs-0.9.153/pcs/lib/commands/qdevice.py --- pcs-0.9.151/pcs/lib/commands/qdevice.py 1970-01-01 00:00:00.000000000 +0000 +++ pcs-0.9.153/pcs/lib/commands/qdevice.py 2016-07-01 13:31:39.000000000 +0000 @@ -0,0 +1,238 @@ +from __future__ import ( + absolute_import, + division, + print_function, + unicode_literals, +) + +import base64 +import binascii + +from pcs.lib import external, reports +from pcs.lib.corosync import qdevice_net +from pcs.lib.errors import LibraryError + + +def qdevice_setup(lib_env, model, enable, start): + """ + Initialize qdevice on local host with specified model + string model qdevice model to initialize + bool enable make qdevice service start on boot + bool start start qdevice now + """ + _ensure_not_cman(lib_env) + _check_model(model) + qdevice_net.qdevice_setup(lib_env.cmd_runner()) + lib_env.report_processor.process( + reports.qdevice_initialization_success(model) + ) + if enable: + _service_enable(lib_env, qdevice_net.qdevice_enable) + if start: + _service_start(lib_env, qdevice_net.qdevice_start) + +def qdevice_destroy(lib_env, model): + """ + Stop and disable qdevice on local host and remove its configuration + string model qdevice model to destroy + """ + _ensure_not_cman(lib_env) + _check_model(model) + _service_stop(lib_env, qdevice_net.qdevice_stop) + _service_disable(lib_env, qdevice_net.qdevice_disable) + qdevice_net.qdevice_destroy() + lib_env.report_processor.process(reports.qdevice_destroy_success(model)) + +def qdevice_status_text(lib_env, model, verbose=False, cluster=None): + """ + Get runtime status of a quorum device in plain text + string model qdevice model to query + bool verbose get more detailed output + string cluster show information only about specified cluster + """ + _ensure_not_cman(lib_env) + _check_model(model) + runner = lib_env.cmd_runner() + return ( + qdevice_net.qdevice_status_generic_text(runner, verbose) + + + qdevice_net.qdevice_status_cluster_text(runner, cluster, verbose) + ) + +def qdevice_enable(lib_env, model): + """ + make qdevice start automatically on boot on local host + """ + _ensure_not_cman(lib_env) + _check_model(model) + _service_enable(lib_env, qdevice_net.qdevice_enable) + +def qdevice_disable(lib_env, model): + """ + make qdevice not start automatically on boot on local host + """ + _ensure_not_cman(lib_env) + _check_model(model) + _service_disable(lib_env, qdevice_net.qdevice_disable) + +def qdevice_start(lib_env, model): + """ + start qdevice now on local host + """ + _ensure_not_cman(lib_env) + _check_model(model) + _service_start(lib_env, qdevice_net.qdevice_start) + +def qdevice_stop(lib_env, model): + """ + stop qdevice now on local host + """ + _ensure_not_cman(lib_env) + _check_model(model) + _service_stop(lib_env, qdevice_net.qdevice_stop) + +def qdevice_kill(lib_env, model): + """ + kill qdevice now on local host + """ + _ensure_not_cman(lib_env) + _check_model(model) + _service_kill(lib_env, qdevice_net.qdevice_kill) + +def qdevice_net_sign_certificate_request( + lib_env, certificate_request, cluster_name +): + """ + Sign node certificate request by qnetd CA + string certificate_request base64 encoded certificate request + string cluster_name name of the cluster to which qdevice is being added + """ + _ensure_not_cman(lib_env) + try: + certificate_request_data = base64.b64decode(certificate_request) + except (TypeError, binascii.Error): + raise LibraryError(reports.invalid_option_value( + "qnetd certificate request", + certificate_request, + ["base64 encoded certificate"] + )) + return base64.b64encode( + qdevice_net.qdevice_sign_certificate_request( + lib_env.cmd_runner(), + certificate_request_data, + cluster_name + ) + ) + +def client_net_setup(lib_env, ca_certificate): + """ + Intialize qdevice net client on local host + ca_certificate base64 encoded qnetd CA certificate + """ + _ensure_not_cman(lib_env) + try: + ca_certificate_data = base64.b64decode(ca_certificate) + except (TypeError, binascii.Error): + raise LibraryError(reports.invalid_option_value( + "qnetd CA certificate", + ca_certificate, + ["base64 encoded certificate"] + )) + qdevice_net.client_setup(lib_env.cmd_runner(), ca_certificate_data) + +def client_net_import_certificate(lib_env, certificate): + """ + Import qnetd client certificate to local node certificate storage + certificate base64 encoded qnetd client certificate + """ + _ensure_not_cman(lib_env) + try: + certificate_data = base64.b64decode(certificate) + except (TypeError, binascii.Error): + raise LibraryError(reports.invalid_option_value( + "qnetd client certificate", + certificate, + ["base64 encoded certificate"] + )) + qdevice_net.client_import_certificate_and_key( + lib_env.cmd_runner(), + certificate_data + ) + +def client_net_destroy(lib_env): + """ + delete qdevice client config files on local host + """ + _ensure_not_cman(lib_env) + qdevice_net.client_destroy() + +def _ensure_not_cman(lib_env): + if lib_env.is_cman_cluster: + raise LibraryError(reports.cman_unsupported_command()) + +def _check_model(model): + if model != "net": + raise LibraryError( + reports.invalid_option_value("model", model, ["net"]) + ) + +def _service_start(lib_env, func): + lib_env.report_processor.process( + reports.service_start_started("quorum device") + ) + try: + func(lib_env.cmd_runner()) + except external.StartServiceError as e: + raise LibraryError( + reports.service_start_error(e.service, e.message) + ) + lib_env.report_processor.process( + reports.service_start_success("quorum device") + ) + +def _service_stop(lib_env, func): + lib_env.report_processor.process( + reports.service_stop_started("quorum device") + ) + try: + func(lib_env.cmd_runner()) + except external.StopServiceError as e: + raise LibraryError( + reports.service_stop_error(e.service, e.message) + ) + lib_env.report_processor.process( + reports.service_stop_success("quorum device") + ) + +def _service_kill(lib_env, func): + try: + func(lib_env.cmd_runner()) + except external.KillServicesError as e: + raise LibraryError( + reports.service_kill_error(e.service, e.message) + ) + lib_env.report_processor.process( + reports.service_kill_success(["quorum device"]) + ) + +def _service_enable(lib_env, func): + try: + func(lib_env.cmd_runner()) + except external.EnableServiceError as e: + raise LibraryError( + reports.service_enable_error(e.service, e.message) + ) + lib_env.report_processor.process( + reports.service_enable_success("quorum device") + ) + +def _service_disable(lib_env, func): + try: + func(lib_env.cmd_runner()) + except external.DisableServiceError as e: + raise LibraryError( + reports.service_disable_error(e.service, e.message) + ) + lib_env.report_processor.process( + reports.service_disable_success("quorum device") + ) diff -Nru pcs-0.9.151/pcs/lib/commands/quorum.py pcs-0.9.153/pcs/lib/commands/quorum.py --- pcs-0.9.151/pcs/lib/commands/quorum.py 2016-05-23 13:20:12.000000000 +0000 +++ pcs-0.9.153/pcs/lib/commands/quorum.py 2016-07-01 13:31:39.000000000 +0000 @@ -5,9 +5,18 @@ unicode_literals, ) - from pcs.lib import reports from pcs.lib.errors import LibraryError +from pcs.lib.corosync import ( + live as corosync_live, + qdevice_net, + qdevice_client +) +from pcs.lib.external import ( + NodeCommunicationException, + node_communicator_exception_to_report_item, + parallel_nodes_communication_helper, +) def get_config(lib_env): @@ -42,6 +51,21 @@ cfg.set_quorum_options(lib_env.report_processor, options) lib_env.push_corosync_conf(cfg, skip_offline_nodes) +def status_text(lib_env): + """ + Get quorum runtime status in plain text + """ + __ensure_not_cman(lib_env) + return corosync_live.get_quorum_status_text(lib_env.cmd_runner()) + +def status_device_text(lib_env, verbose=False): + """ + Get quorum device client runtime status in plain text + bool verbose get more detailed output + """ + __ensure_not_cman(lib_env) + return qdevice_client.get_status_text(lib_env.cmd_runner(), verbose) + def add_device( lib_env, model, model_options, generic_options, force_model=False, force_options=False, skip_offline_nodes=False @@ -58,6 +82,8 @@ __ensure_not_cman(lib_env) cfg = lib_env.get_corosync_conf() + # Try adding qdevice to corosync.conf. This validates all the options and + # makes sure qdevice is not defined in corosync.conf yet. cfg.add_quorum_device( lib_env.report_processor, model, @@ -66,9 +92,131 @@ force_model, force_options ) - # TODO validation, verification, certificates, etc. + + # First setup certificates for qdevice, then send corosync.conf to nodes. + # If anything fails, nodes will not have corosync.conf with qdevice in it, + # so there is no effect on the cluster. + if lib_env.is_corosync_conf_live: + # do model specific configuration + # if model is not known to pcs and was forced, do not configure antyhing + # else but corosync.conf, as we do not know what to do anyways + if model == "net": + _add_device_model_net( + lib_env, + # we are sure it's there, it was validated in add_quorum_device + model_options["host"], + cfg.get_cluster_name(), + cfg.get_nodes(), + skip_offline_nodes + ) + + lib_env.report_processor.process( + reports.service_enable_started("corosync-qdevice") + ) + communicator = lib_env.node_communicator() + parallel_nodes_communication_helper( + qdevice_client.remote_client_enable, + [ + [(lib_env.report_processor, communicator, node), {}] + for node in cfg.get_nodes() + ], + lib_env.report_processor, + skip_offline_nodes + ) + + # everything set up, it's safe to tell the nodes to use qdevice lib_env.push_corosync_conf(cfg, skip_offline_nodes) + # Now, when corosync.conf has been reloaded, we can start qdevice service. + if lib_env.is_corosync_conf_live: + lib_env.report_processor.process( + reports.service_start_started("corosync-qdevice") + ) + communicator = lib_env.node_communicator() + parallel_nodes_communication_helper( + qdevice_client.remote_client_start, + [ + [(lib_env.report_processor, communicator, node), {}] + for node in cfg.get_nodes() + ], + lib_env.report_processor, + skip_offline_nodes + ) + +def _add_device_model_net( + lib_env, qnetd_host, cluster_name, cluster_nodes, skip_offline_nodes +): + """ + setup cluster nodes for using qdevice model net + string qnetd_host address of qdevice provider (qnetd host) + string cluster_name name of the cluster to which qdevice is being added + NodeAddressesList cluster_nodes list of cluster nodes addresses + bool skip_offline_nodes continue even if not all nodes are accessible + """ + communicator = lib_env.node_communicator() + runner = lib_env.cmd_runner() + reporter = lib_env.report_processor + + reporter.process( + reports.qdevice_certificate_distribution_started() + ) + # get qnetd CA certificate + try: + qnetd_ca_cert = qdevice_net.remote_qdevice_get_ca_certificate( + communicator, + qnetd_host + ) + except NodeCommunicationException as e: + raise LibraryError( + node_communicator_exception_to_report_item(e) + ) + # init certificate storage on all nodes + parallel_nodes_communication_helper( + qdevice_net.remote_client_setup, + [ + ((communicator, node, qnetd_ca_cert), {}) + for node in cluster_nodes + ], + reporter, + skip_offline_nodes + ) + # create client certificate request + cert_request = qdevice_net.client_generate_certificate_request( + runner, + cluster_name + ) + # sign the request on qnetd host + try: + signed_certificate = qdevice_net.remote_sign_certificate_request( + communicator, + qnetd_host, + cert_request, + cluster_name + ) + except NodeCommunicationException as e: + raise LibraryError( + node_communicator_exception_to_report_item(e) + ) + # transform the signed certificate to pk12 format which can sent to nodes + pk12 = qdevice_net.client_cert_request_to_pk12(runner, signed_certificate) + # distribute final certificate to nodes + def do_and_report(reporter, communicator, node, pk12): + qdevice_net.remote_client_import_certificate_and_key( + communicator, node, pk12 + ) + reporter.process( + reports.qdevice_certificate_accepted_by_node(node.label) + ) + parallel_nodes_communication_helper( + do_and_report, + [ + ((reporter, communicator, node, pk12), {}) + for node in cluster_nodes + ], + reporter, + skip_offline_nodes + ) + def update_device( lib_env, model_options, generic_options, force_options=False, skip_offline_nodes=False @@ -98,9 +246,95 @@ __ensure_not_cman(lib_env) cfg = lib_env.get_corosync_conf() + model, dummy_options, dummy_options = cfg.get_quorum_device_settings() cfg.remove_quorum_device() lib_env.push_corosync_conf(cfg, skip_offline_nodes) + if lib_env.is_corosync_conf_live: + # disable qdevice + lib_env.report_processor.process( + reports.service_disable_started("corosync-qdevice") + ) + communicator = lib_env.node_communicator() + parallel_nodes_communication_helper( + qdevice_client.remote_client_disable, + [ + [(lib_env.report_processor, communicator, node), {}] + for node in cfg.get_nodes() + ], + lib_env.report_processor, + skip_offline_nodes + ) + # stop qdevice + lib_env.report_processor.process( + reports.service_stop_started("corosync-qdevice") + ) + communicator = lib_env.node_communicator() + parallel_nodes_communication_helper( + qdevice_client.remote_client_stop, + [ + [(lib_env.report_processor, communicator, node), {}] + for node in cfg.get_nodes() + ], + lib_env.report_processor, + skip_offline_nodes + ) + # handle model specific configuration + if model == "net": + _remove_device_model_net( + lib_env, + cfg.get_nodes(), + skip_offline_nodes + ) + +def _remove_device_model_net(lib_env, cluster_nodes, skip_offline_nodes): + """ + remove configuration used by qdevice model net + NodeAddressesList cluster_nodes list of cluster nodes addresses + bool skip_offline_nodes continue even if not all nodes are accessible + """ + reporter = lib_env.report_processor + communicator = lib_env.node_communicator() + + reporter.process( + reports.qdevice_certificate_removal_started() + ) + def do_and_report(reporter, communicator, node): + qdevice_net.remote_client_destroy(communicator, node) + reporter.process( + reports.qdevice_certificate_removed_from_node(node.label) + ) + parallel_nodes_communication_helper( + do_and_report, + [ + [(reporter, communicator, node), {}] + for node in cluster_nodes + ], + lib_env.report_processor, + skip_offline_nodes + ) + +def set_expected_votes_live(lib_env, expected_votes): + """ + set expected votes in live cluster to specified value + numeric expected_votes desired value of expected votes + """ + if lib_env.is_cman_cluster: + raise LibraryError(reports.cman_unsupported_command()) + + try: + votes_int = int(expected_votes) + if votes_int < 1: + raise ValueError() + except ValueError: + raise LibraryError(reports.invalid_option_value( + "expected votes", + expected_votes, + "positive integer" + )) + + corosync_live.set_expected_votes(lib_env.cmd_runner(), votes_int) + def __ensure_not_cman(lib_env): if lib_env.is_corosync_conf_live and lib_env.is_cman_cluster: raise LibraryError(reports.cman_unsupported_command()) diff -Nru pcs-0.9.151/pcs/lib/commands/sbd.py pcs-0.9.153/pcs/lib/commands/sbd.py --- pcs-0.9.151/pcs/lib/commands/sbd.py 1970-01-01 00:00:00.000000000 +0000 +++ pcs-0.9.153/pcs/lib/commands/sbd.py 2016-07-01 13:31:39.000000000 +0000 @@ -0,0 +1,355 @@ +from __future__ import ( + absolute_import, + division, + print_function, + unicode_literals, +) + +import json + +from pcs import settings +from pcs.common import ( + tools, + report_codes, +) +from pcs.lib import ( + sbd, + reports, + nodes_task, +) +from pcs.lib.tools import environment_file_to_dict +from pcs.lib.errors import ( + LibraryError, + ReportItemSeverity as Severities +) +from pcs.lib.external import ( + node_communicator_exception_to_report_item, + NodeCommunicationException, + NodeConnectionException, + NodeCommandUnsuccessfulException, +) +from pcs.lib.node import ( + NodeAddressesList, + NodeNotFound +) + + +def _validate_sbd_options(sbd_config, allow_unknown_opts=False): + """ + Validate user SBD configuration. Options 'SBD_WATCHDOG_DEV' and 'SBD_OPTS' + are restricted. Returns list of ReportItem + + sbd_config -- dictionary in format: : + allow_unknown_opts -- if True, accept also unknown options. + """ + + report_item_list = [] + unsupported_sbd_option_list = ["SBD_WATCHDOG_DEV", "SBD_OPTS"] + allowed_sbd_options = [ + "SBD_DELAY_START", "SBD_STARTMODE", "SBD_WATCHDOG_TIMEOUT" + ] + for sbd_opt in sbd_config: + if sbd_opt in unsupported_sbd_option_list: + report_item_list.append(reports.invalid_option( + sbd_opt, allowed_sbd_options, None + )) + + elif sbd_opt not in allowed_sbd_options: + report_item_list.append(reports.invalid_option( + sbd_opt, + allowed_sbd_options, + None, + Severities.WARNING if allow_unknown_opts else Severities.ERROR, + None if allow_unknown_opts else report_codes.FORCE_OPTIONS + )) + + return report_item_list + + +def _get_full_watchdog_list(node_list, default_watchdog, watchdog_dict): + """ + Validate if all nodes in watchdog_dict does exist and returns dictionary + where keys are nodes and value is corresponding watchdog. + Raises LibraryError if any of nodes doesn't belong to cluster. + + node_list -- NodeAddressesList + default_watchdog -- watchdog for nodes which are not specified + in watchdog_dict + watchdog_dict -- dictionary with node names as keys and value as watchdog + """ + full_dict = dict([(node, default_watchdog) for node in node_list]) + report_item_list = [] + + for node_name, watchdog in watchdog_dict.items(): + try: + full_dict[node_list.find_by_label(node_name)] = watchdog + except NodeNotFound: + report_item_list.append(reports.node_not_found(node_name)) + + if report_item_list: + raise LibraryError(*report_item_list) + + return full_dict + + +def enable_sbd( + lib_env, default_watchdog, watchdog_dict, sbd_options, + allow_unknown_opts=False, ignore_offline_nodes=False +): + """ + Enable SBD on all nodes in cluster. + + lib_env -- LibraryEnvironment + default_watchdog -- watchdog for nodes which are not specified in + watchdog_dict. Uses default value from settings if None. + watchdog_dict -- dictionary with NodeAddresses as keys and watchdog path + as value + sbd_options -- dictionary in format: : + allow_unknown_opts -- if True, accept also unknown options. + ignore_offline_nodes -- if True, omit offline nodes + """ + __ensure_not_cman(lib_env) + + node_list = _get_cluster_nodes(lib_env) + + if not default_watchdog: + default_watchdog = settings.sbd_watchdog_default + + # input validation begin + full_watchdog_dict = _get_full_watchdog_list( + node_list, default_watchdog, watchdog_dict + ) + + # config validation + sbd_options = dict([(opt.upper(), val) for opt, val in sbd_options.items()]) + lib_env.report_processor.process_list( + _validate_sbd_options(sbd_options, allow_unknown_opts) + ) + + # check nodes status + online_nodes = _get_online_nodes(lib_env, node_list, ignore_offline_nodes) + for node in list(full_watchdog_dict): + if node not in online_nodes: + full_watchdog_dict.pop(node, None) + # input validation end + + # check if SBD can be enabled + sbd.check_sbd_on_all_nodes( + lib_env.report_processor, + lib_env.node_communicator(), + full_watchdog_dict + ) + + # distribute SBD configuration + config = sbd.get_default_sbd_config() + config.update(sbd_options) + sbd.set_sbd_config_on_all_nodes( + lib_env.report_processor, + lib_env.node_communicator(), + online_nodes, + config + ) + + # remove cluster prop 'stonith_watchdog_timeout' + sbd.remove_stonith_watchdog_timeout_on_all_nodes( + lib_env.node_communicator(), online_nodes + ) + + # enable SBD service an all nodes + sbd.enable_sbd_service_on_all_nodes( + lib_env.report_processor, lib_env.node_communicator(), online_nodes + ) + + lib_env.report_processor.process( + reports.cluster_restart_required_to_apply_changes() + ) + + +def disable_sbd(lib_env, ignore_offline_nodes=False): + """ + Disable SBD on all nodes in cluster. + + lib_env -- LibraryEnvironment + ignore_offline_nodes -- if True, omit offline nodes + """ + __ensure_not_cman(lib_env) + + node_list = _get_online_nodes( + lib_env, _get_cluster_nodes(lib_env), ignore_offline_nodes + ) + + sbd.set_stonith_watchdog_timeout_to_zero_on_all_nodes( + lib_env.node_communicator(), node_list + ) + sbd.disable_sbd_service_on_all_nodes( + lib_env.report_processor, + lib_env.node_communicator(), + node_list + ) + + lib_env.report_processor.process( + reports.cluster_restart_required_to_apply_changes() + ) + + +def _get_online_nodes(lib_env, node_list, ignore_offline_nodes=False): + """ + Returns NodeAddressesList of online nodes. + Raises LibraryError on any failure. + + lib_env -- LibraryEnvironment + node_list -- NodeAddressesList + ignore_offline_nodes -- if True offline nodes are just omitted from + returned list. + """ + to_raise = [] + online_node_list = NodeAddressesList() + + def is_node_online(node): + try: + nodes_task.node_check_auth(lib_env.node_communicator(), node) + online_node_list.append(node) + except NodeConnectionException as e: + if ignore_offline_nodes: + to_raise.append(reports.omitting_node(node.label)) + else: + to_raise.append(node_communicator_exception_to_report_item( + e, Severities.ERROR, report_codes.SKIP_OFFLINE_NODES + )) + except NodeCommunicationException as e: + to_raise.append(node_communicator_exception_to_report_item(e)) + + tools.run_parallel(is_node_online, [([node], {}) for node in node_list]) + + lib_env.report_processor.process_list(to_raise) + return online_node_list + + +def get_cluster_sbd_status(lib_env): + """ + Returns status of SBD service in cluster in dictionary with format: + { + : { + "installed": , + "enabled": , + "running": + }, + ... + } + + lib_env -- LibraryEnvironment + """ + __ensure_not_cman(lib_env) + + node_list = _get_cluster_nodes(lib_env) + report_item_list = [] + successful_node_list = [] + status_list = [] + + def get_sbd_status(node): + try: + status_list.append({ + "node": node, + "status": json.loads( + sbd.check_sbd(lib_env.node_communicator(), node, "") + )["sbd"] + }) + successful_node_list.append(node) + except NodeCommunicationException as e: + report_item_list.append(reports.unable_to_get_sbd_status( + node.label, + node_communicator_exception_to_report_item(e).message + )) + except (ValueError, KeyError) as e: + report_item_list.append(reports.unable_to_get_sbd_status( + node.label, str(e) + )) + + tools.run_parallel(get_sbd_status, [([node], {}) for node in node_list]) + lib_env.report_processor.process_list(report_item_list) + + for node in node_list: + if node not in successful_node_list: + status_list.append({ + "node": node, + "status": { + "installed": None, + "enabled": None, + "running": None + } + }) + return status_list + + +def get_cluster_sbd_config(lib_env): + """ + Returns list of SBD config from all cluster nodes in cluster. Structure + of data: + [ + { + "node": + "config": or None if there was failure, + }, + ... + ] + If error occurs while obtaining config from some node, it's config will be + None. If obtaining config fail on all node returns empty dictionary. + + lib_env -- LibraryEnvironment + """ + __ensure_not_cman(lib_env) + + node_list = _get_cluster_nodes(lib_env) + config_list = [] + successful_node_list = [] + report_item_list = [] + + def get_sbd_config(node): + try: + config_list.append({ + "node": node, + "config": environment_file_to_dict( + sbd.get_sbd_config(lib_env.node_communicator(), node) + ) + }) + successful_node_list.append(node) + except NodeCommandUnsuccessfulException as e: + report_item_list.append(reports.unable_to_get_sbd_config( + node.label, + e.reason, + Severities.WARNING + )) + except NodeCommunicationException as e: + report_item_list.append(reports.unable_to_get_sbd_config( + node.label, + node_communicator_exception_to_report_item(e).message, + Severities.WARNING + )) + + tools.run_parallel(get_sbd_config, [([node], {}) for node in node_list]) + lib_env.report_processor.process_list(report_item_list) + + if not len(config_list): + return [] + + for node in node_list: + if node not in successful_node_list: + config_list.append({ + "node": node, + "config": None + }) + return config_list + + +def get_local_sbd_config(lib_env): + __ensure_not_cman(lib_env) + return environment_file_to_dict(sbd.get_local_sbd_config()) + + +def _get_cluster_nodes(lib_env): + return lib_env.get_corosync_conf().get_nodes() + + +def __ensure_not_cman(lib_env): + if lib_env.is_cman_cluster: + raise LibraryError(reports.cman_unsupported_command()) diff -Nru pcs-0.9.151/pcs/lib/commands/test/test_alert.py pcs-0.9.153/pcs/lib/commands/test/test_alert.py --- pcs-0.9.151/pcs/lib/commands/test/test_alert.py 1970-01-01 00:00:00.000000000 +0000 +++ pcs-0.9.153/pcs/lib/commands/test/test_alert.py 2016-07-01 13:31:39.000000000 +0000 @@ -0,0 +1,639 @@ +from __future__ import ( + absolute_import, + division, + print_function, + unicode_literals, +) + +import logging +from lxml import etree + +from unittest import TestCase + +from pcs.test.tools.pcs_mock import mock +from pcs.test.tools.assertions import ( + assert_raise_library_error, + assert_xml_equal, +) +from pcs.test.tools.custom_mock import MockLibraryReportProcessor + +from pcs.common import report_codes +from pcs.lib.errors import ReportItemSeverity as Severities +from pcs.lib.env import LibraryEnvironment +from pcs.lib.external import CommandRunner + +import pcs.lib.commands.alert as cmd_alert + + +@mock.patch("pcs.lib.cib.tools.upgrade_cib") +class CreateAlertTest(TestCase): + def setUp(self): + self.mock_log = mock.MagicMock(spec_set=logging.Logger) + self.mock_run = mock.MagicMock(spec_set=CommandRunner) + self.mock_rep = MockLibraryReportProcessor() + self.mock_env = LibraryEnvironment( + self.mock_log, self.mock_rep, cib_data="" + ) + + def test_no_path(self, mock_upgrade_cib): + assert_raise_library_error( + lambda: cmd_alert.create_alert( + self.mock_env, None, None, None, None + ), + ( + Severities.ERROR, + report_codes.REQUIRED_OPTION_IS_MISSING, + {"option_name": "path"} + ) + ) + self.assertEqual(0, mock_upgrade_cib.call_count) + + def test_upgrade_needed(self, mock_upgrade_cib): + self.mock_env._push_cib_xml( + """ + + + + + """ + ) + mock_upgrade_cib.return_value = etree.XML( + """ + + + + + """ + ) + cmd_alert.create_alert( + self.mock_env, + "my-alert", + "/my/path", + { + "instance": "value", + "another": "val" + }, + {"meta1": "val1"}, + "my description" + ) + assert_xml_equal( + """ + + + + + + + + + + + + + + + + """, + self.mock_env._get_cib_xml() + ) + self.assertEqual(1, mock_upgrade_cib.call_count) + + +class UpdateAlertTest(TestCase): + def setUp(self): + self.mock_log = mock.MagicMock(spec_set=logging.Logger) + self.mock_run = mock.MagicMock(spec_set=CommandRunner) + self.mock_rep = MockLibraryReportProcessor() + self.mock_env = LibraryEnvironment( + self.mock_log, self.mock_rep, cib_data="" + ) + + def test_update_all(self): + self.mock_env._push_cib_xml( + """ + + + + + + + + + + + + + + + + """ + ) + cmd_alert.update_alert( + self.mock_env, + "my-alert", + "/another/one", + { + "instance": "", + "my-attr": "its_val" + }, + {"meta1": "val2"}, + "" + ) + assert_xml_equal( + """ + + + + + + + + + + + + + + + + """, + self.mock_env._get_cib_xml() + ) + + def test_update_instance_attribute(self): + self.mock_env._push_cib_xml( + """ + + + + + + + + + + + + """ + ) + cmd_alert.update_alert( + self.mock_env, + "my-alert", + None, + {"instance": "new_val"}, + {}, + None + ) + assert_xml_equal( + """ + + + + + + + + + + + + """, + self.mock_env._get_cib_xml() + ) + + def test_alert_doesnt_exist(self): + self.mock_env._push_cib_xml( + """ + + + + + + + + """ + ) + assert_raise_library_error( + lambda: cmd_alert.update_alert( + self.mock_env, "unknown", "test", {}, {}, None + ), + ( + Severities.ERROR, + report_codes.CIB_ALERT_NOT_FOUND, + {"alert": "unknown"} + ) + ) + + +class RemoveAlertTest(TestCase): + def setUp(self): + self.mock_log = mock.MagicMock(spec_set=logging.Logger) + self.mock_run = mock.MagicMock(spec_set=CommandRunner) + self.mock_rep = MockLibraryReportProcessor() + cib = """ + + + + + + + + + """ + self.mock_env = LibraryEnvironment( + self.mock_log, self.mock_rep, cib_data=cib + ) + + def test_success(self): + cmd_alert.remove_alert(self.mock_env, "alert") + assert_xml_equal( + """ + + + + + + + + """, + self.mock_env._get_cib_xml() + ) + + def test_not_existing_alert(self): + assert_raise_library_error( + lambda: cmd_alert.remove_alert(self.mock_env, "unknown"), + ( + Severities.ERROR, + report_codes.CIB_ALERT_NOT_FOUND, + {"alert": "unknown"} + ) + ) + + +class AddRecipientTest(TestCase): + def setUp(self): + self.mock_log = mock.MagicMock(spec_set=logging.Logger) + self.mock_run = mock.MagicMock(spec_set=CommandRunner) + self.mock_rep = MockLibraryReportProcessor() + cib = """ + + + + + + + + + + """ + self.mock_env = LibraryEnvironment( + self.mock_log, self.mock_rep, cib_data=cib + ) + + def test_alert_not_found(self): + assert_raise_library_error( + lambda: cmd_alert.add_recipient( + self.mock_env, "unknown", "recipient", {}, {} + ), + ( + Severities.ERROR, + report_codes.CIB_ALERT_NOT_FOUND, + {"alert": "unknown"} + ) + ) + + def test_value_not_defined(self): + assert_raise_library_error( + lambda: cmd_alert.add_recipient( + self.mock_env, "unknown", "", {}, {} + ), + ( + Severities.ERROR, + report_codes.REQUIRED_OPTION_IS_MISSING, + {"option_name": "value"} + ) + ) + + def test_recipient_already_exists(self): + assert_raise_library_error( + lambda: cmd_alert.add_recipient( + self.mock_env, "alert", "value1", {}, {} + ), + ( + Severities.ERROR, + report_codes.CIB_ALERT_RECIPIENT_ALREADY_EXISTS, + { + "recipient": "value1", + "alert": "alert" + } + ) + ) + + def test_success(self): + cmd_alert.add_recipient( + self.mock_env, + "alert", + "value", + {"attr1": "val1"}, + { + "attr2": "val2", + "attr1": "val1" + } + ) + assert_xml_equal( + """ + + + + + + + + + + + + + + + + + + + """, + self.mock_env._get_cib_xml() + ) + + +class UpdateRecipientTest(TestCase): + def setUp(self): + self.mock_log = mock.MagicMock(spec_set=logging.Logger) + self.mock_run = mock.MagicMock(spec_set=CommandRunner) + self.mock_rep = MockLibraryReportProcessor() + cib = """ + + + + + + + + + + + + + + + + + + + """ + self.mock_env = LibraryEnvironment( + self.mock_log, self.mock_rep, cib_data=cib + ) + + def test_alert_not_found(self): + assert_raise_library_error( + lambda: cmd_alert.update_recipient( + self.mock_env, "unknown", "recipient", {}, {} + ), + ( + Severities.ERROR, + report_codes.CIB_ALERT_NOT_FOUND, + {"alert": "unknown"} + ) + ) + + def test_recipient_not_found(self): + assert_raise_library_error( + lambda: cmd_alert.update_recipient( + self.mock_env, "alert", "recipient", {}, {} + ), + ( + Severities.ERROR, + report_codes.CIB_ALERT_RECIPIENT_NOT_FOUND, + { + "recipient": "recipient", + "alert": "alert" + } + ) + ) + + def test_update_all(self): + cmd_alert.update_recipient( + self.mock_env, + "alert", + "value", + {"attr1": "value"}, + { + "attr1": "", + "attr3": "new_val" + }, + "desc" + ) + assert_xml_equal( + """ + + + + + + + + + + + + + + + + + + + """, + self.mock_env._get_cib_xml() + ) + + +class RemoveRecipientTest(TestCase): + def setUp(self): + self.mock_log = mock.MagicMock(spec_set=logging.Logger) + self.mock_run = mock.MagicMock(spec_set=CommandRunner) + self.mock_rep = MockLibraryReportProcessor() + cib = """ + + + + + + + + + + + """ + self.mock_env = LibraryEnvironment( + self.mock_log, self.mock_rep, cib_data=cib + ) + + def test_alert_not_found(self): + assert_raise_library_error( + lambda: cmd_alert.remove_recipient( + self.mock_env, "unknown", "recipient" + ), + ( + Severities.ERROR, + report_codes.CIB_ALERT_NOT_FOUND, + {"alert": "unknown"} + ) + ) + + def test_recipient_not_found(self): + assert_raise_library_error( + lambda: cmd_alert.remove_recipient( + self.mock_env, "alert", "recipient" + ), + ( + Severities.ERROR, + report_codes.CIB_ALERT_RECIPIENT_NOT_FOUND, + { + "recipient": "recipient", + "alert": "alert" + } + ) + ) + + def test_success(self): + cmd_alert.remove_recipient(self.mock_env, "alert", "value1") + assert_xml_equal( + """ + + + + + + + + + + """, + self.mock_env._get_cib_xml() + ) + + +@mock.patch("pcs.lib.cib.alert.get_all_alerts") +class GetAllAlertsTest(TestCase): + def setUp(self): + self.mock_log = mock.MagicMock(spec_set=logging.Logger) + self.mock_run = mock.MagicMock(spec_set=CommandRunner) + self.mock_rep = MockLibraryReportProcessor() + self.mock_env = LibraryEnvironment( + self.mock_log, self.mock_rep, cib_data='' + ) + + def test_success(self, mock_alerts): + mock_alerts.return_value = [{"id": "alert"}] + self.assertEqual( + [{"id": "alert"}], + cmd_alert.get_all_alerts(self.mock_env) + ) + self.assertEqual(1, mock_alerts.call_count) diff -Nru pcs-0.9.151/pcs/lib/commands/test/test_ticket.py pcs-0.9.153/pcs/lib/commands/test/test_ticket.py --- pcs-0.9.151/pcs/lib/commands/test/test_ticket.py 2016-05-23 13:20:12.000000000 +0000 +++ pcs-0.9.153/pcs/lib/commands/test/test_ticket.py 2016-07-01 13:31:39.000000000 +0000 @@ -44,7 +44,7 @@ }) assert_xml_equal( - env.get_cib_xml(), + env._get_cib_xml(), str(cib.append_to_first_tag_name( 'constraints', """ 2nodelms. We don't care about that, it's not really - # a change, as there was no qdevice before. So we override it. - self._need_stopped_cluster = need_stopped_cluster def update_quorum_device( self, report_processor, model_options, generic_options, @@ -281,9 +318,10 @@ model_sections.extend(device.get_sections(model)) self.__set_section_options(device_sections, generic_options) self.__set_section_options(model_sections, model_options) + self.__update_qdevice_votes() self.__update_two_node() self.__remove_empty_sections(self.config) - self._need_stopped_cluster = True + self._need_qdevice_reload = True def remove_quorum_device(self): """ @@ -329,9 +367,8 @@ def __validate_quorum_device_model_net_options( self, model_options, need_required, force=False ): - required_options = frozenset(["host"]) + required_options = frozenset(["host", "algorithm"]) optional_options = frozenset([ - "algorithm", "connect_timeout", "force_ip_version", "port", @@ -370,7 +407,7 @@ continue if name == "algorithm": - allowed_values = ("2nodelms", "ffsplit", "lms") + allowed_values = ("ffsplit", "lms") if value not in allowed_values: report_items.append(reports.invalid_option_value( name, value, allowed_values, severity, forceable @@ -462,19 +499,29 @@ else: for quorum in self.config.get_sections("quorum"): quorum.del_attributes_by_name("two_node") - # update qdevice algorithm "lms" vs "2nodelms" + + def __update_qdevice_votes(self): + # ffsplit won't start if votes is missing or not set to 1 + # for other algorithms it's required not to put votes at all + model = None + algorithm = None + device_sections = [] for quorum in self.config.get_sections("quorum"): for device in quorum.get_sections("device"): - for net in device.get_sections("net"): - algorithm = None - for dummy_name, value in net.get_attributes("algorithm"): - algorithm = value - if algorithm == "lms" and has_two_nodes: - net.set_attribute("algorithm", "2nodelms") - self._need_stopped_cluster = True - elif algorithm == "2nodelms" and not has_two_nodes: - net.set_attribute("algorithm", "lms") - self._need_stopped_cluster = True + device_sections.append(device) + for dummy_name, value in device.get_attributes("model"): + model = value + for device in device_sections: + for model_section in device.get_sections(model): + for dummy_name, value in model_section.get_attributes( + "algorithm" + ): + algorithm = value + if model == "net": + if algorithm == "ffsplit": + self.__set_section_options(device_sections, {"votes": "1"}) + else: + self.__set_section_options(device_sections, {"votes": ""}) def __set_section_options(self, section_list, options): for section in section_list[:-1]: diff -Nru pcs-0.9.151/pcs/lib/corosync/live.py pcs-0.9.153/pcs/lib/corosync/live.py --- pcs-0.9.151/pcs/lib/corosync/live.py 2016-05-23 13:20:12.000000000 +0000 +++ pcs-0.9.153/pcs/lib/corosync/live.py 2016-07-01 13:31:39.000000000 +0000 @@ -47,3 +47,33 @@ reports.corosync_config_reload_error(output.rstrip()) ) +def get_quorum_status_text(runner): + """ + Get runtime quorum status from the local node + """ + output, retval = runner.run([ + os.path.join(settings.corosync_binaries, "corosync-quorumtool"), + "-p" + ]) + # retval is 0 on success if node is not in partition with quorum + # retval is 1 on error OR on success if node has quorum + if retval not in [0, 1]: + raise LibraryError( + reports.corosync_quorum_get_status_error(output) + ) + return output + +def set_expected_votes(runner, votes): + """ + set expected votes in live cluster to specified value + """ + output, retval = runner.run([ + os.path.join(settings.corosync_binaries, "corosync-quorumtool"), + # format votes to handle the case where they are int + "-e", "{0}".format(votes) + ]) + if retval != 0: + raise LibraryError( + reports.corosync_quorum_set_expected_votes_error(output) + ) + return output diff -Nru pcs-0.9.151/pcs/lib/corosync/qdevice_client.py pcs-0.9.153/pcs/lib/corosync/qdevice_client.py --- pcs-0.9.151/pcs/lib/corosync/qdevice_client.py 1970-01-01 00:00:00.000000000 +0000 +++ pcs-0.9.153/pcs/lib/corosync/qdevice_client.py 2016-07-01 13:31:39.000000000 +0000 @@ -0,0 +1,93 @@ +from __future__ import ( + absolute_import, + division, + print_function, + unicode_literals, +) + +import os.path + +from pcs import settings +from pcs.lib import reports +from pcs.lib.errors import LibraryError + + +def get_status_text(runner, verbose=False): + """ + Get quorum device client runtime status in plain text + bool verbose get more detailed output + """ + cmd = [ + os.path.join(settings.corosync_binaries, "corosync-qdevice-tool"), + "-s" + ] + if verbose: + cmd.append("-v") + output, retval = runner.run(cmd) + if retval != 0: + raise LibraryError( + reports.corosync_quorum_get_status_error(output) + ) + return output + +def remote_client_enable(reporter, node_communicator, node): + """ + enable qdevice client service (corosync-qdevice) on a remote node + """ + response = node_communicator.call_node( + node, + "remote/qdevice_client_enable", + None + ) + if response == "corosync is not enabled, skipping": + reporter.process( + reports.service_enable_skipped( + "corosync-qdevice", + "corosync is not enabled", + node.label + ) + ) + else: + reporter.process( + reports.service_enable_success("corosync-qdevice", node.label) + ) + +def remote_client_disable(reporter, node_communicator, node): + """ + disable qdevice client service (corosync-qdevice) on a remote node + """ + node_communicator.call_node(node, "remote/qdevice_client_disable", None) + reporter.process( + reports.service_disable_success("corosync-qdevice", node.label) + ) + +def remote_client_start(reporter, node_communicator, node): + """ + start qdevice client service (corosync-qdevice) on a remote node + """ + response = node_communicator.call_node( + node, + "remote/qdevice_client_start", + None + ) + if response == "corosync is not running, skipping": + reporter.process( + reports.service_start_skipped( + "corosync-qdevice", + "corosync is not running", + node.label + ) + ) + else: + reporter.process( + reports.service_start_success("corosync-qdevice", node.label) + ) + +def remote_client_stop(reporter, node_communicator, node): + """ + stop qdevice client service (corosync-qdevice) on a remote node + """ + node_communicator.call_node(node, "remote/qdevice_client_stop", None) + reporter.process( + reports.service_stop_success("corosync-qdevice", node.label) + ) diff -Nru pcs-0.9.151/pcs/lib/corosync/qdevice_net.py pcs-0.9.153/pcs/lib/corosync/qdevice_net.py --- pcs-0.9.151/pcs/lib/corosync/qdevice_net.py 1970-01-01 00:00:00.000000000 +0000 +++ pcs-0.9.153/pcs/lib/corosync/qdevice_net.py 2016-07-01 13:31:39.000000000 +0000 @@ -0,0 +1,382 @@ +from __future__ import ( + absolute_import, + division, + print_function, + unicode_literals, +) + +import base64 +import binascii +import functools +import os +import os.path +import re +import shutil +import tempfile + +from pcs import settings +from pcs.lib import external, reports +from pcs.lib.errors import LibraryError + + +__model = "net" +__service_name = "corosync-qnetd" +__qnetd_certutil = os.path.join( + settings.corosync_qnet_binaries, + "corosync-qnetd-certutil" +) +__qnetd_tool = os.path.join( + settings.corosync_qnet_binaries, + "corosync-qnetd-tool" +) +__qdevice_certutil = os.path.join( + settings.corosync_binaries, + "corosync-qdevice-net-certutil" +) + +def qdevice_setup(runner): + """ + initialize qdevice on local host + """ + if external.is_dir_nonempty(settings.corosync_qdevice_net_server_certs_dir): + raise LibraryError(reports.qdevice_already_initialized(__model)) + + output, retval = runner.run([ + __qnetd_certutil, "-i" + ]) + if retval != 0: + raise LibraryError( + reports.qdevice_initialization_error(__model, output.rstrip()) + ) + +def qdevice_initialized(): + """ + check if qdevice server certificate database has been initialized + """ + return os.path.exists(os.path.join( + settings.corosync_qdevice_net_server_certs_dir, + "cert8.db" + )) + +def qdevice_destroy(): + """ + delete qdevice configuration on local host + """ + try: + if qdevice_initialized(): + shutil.rmtree(settings.corosync_qdevice_net_server_certs_dir) + except EnvironmentError as e: + raise LibraryError( + reports.qdevice_destroy_error(__model, e.strerror) + ) + +def qdevice_status_generic_text(runner, verbose=False): + """ + get qdevice runtime status in plain text + bool verbose get more detailed output + """ + cmd = [__qnetd_tool, "-s"] + if verbose: + cmd.append("-v") + output, retval = runner.run(cmd) + if retval != 0: + raise LibraryError(reports.qdevice_get_status_error(__model, output)) + return output + +def qdevice_status_cluster_text(runner, cluster=None, verbose=False): + """ + get qdevice runtime status in plain text + bool verbose get more detailed output + string cluster show information only about specified cluster + """ + cmd = [__qnetd_tool, "-l"] + if verbose: + cmd.append("-v") + if cluster: + cmd.extend(["-c", cluster]) + output, retval = runner.run(cmd) + if retval != 0: + raise LibraryError(reports.qdevice_get_status_error(__model, output)) + return output + +def qdevice_enable(runner): + """ + make qdevice start automatically on boot on local host + """ + external.enable_service(runner, __service_name) + +def qdevice_disable(runner): + """ + make qdevice not start automatically on boot on local host + """ + external.disable_service(runner, __service_name) + +def qdevice_start(runner): + """ + start qdevice now on local host + """ + external.start_service(runner, __service_name) + +def qdevice_stop(runner): + """ + stop qdevice now on local host + """ + external.stop_service(runner, __service_name) + +def qdevice_kill(runner): + """ + kill qdevice now on local host + """ + external.kill_services(runner, [__service_name]) + +def qdevice_sign_certificate_request(runner, cert_request, cluster_name): + """ + sign client certificate request + cert_request certificate request data + string cluster_name name of the cluster to which qdevice is being added + """ + if not qdevice_initialized(): + raise LibraryError(reports.qdevice_not_initialized(__model)) + # save the certificate request, corosync tool only works with files + tmpfile = _store_to_tmpfile( + cert_request, + reports.qdevice_certificate_sign_error + ) + # sign the request + output, retval = runner.run([ + __qnetd_certutil, "-s", "-c", tmpfile.name, "-n", cluster_name + ]) + tmpfile.close() # temp file is deleted on close + if retval != 0: + raise LibraryError( + reports.qdevice_certificate_sign_error(output.strip()) + ) + # get signed certificate, corosync tool only works with files + return _get_output_certificate( + output, + reports.qdevice_certificate_sign_error + ) + +def client_setup(runner, ca_certificate): + """ + initialize qdevice client on local host + ca_certificate qnetd CA certificate + """ + client_destroy() + # save CA certificate, corosync tool only works with files + ca_file_path = os.path.join( + settings.corosync_qdevice_net_client_certs_dir, + settings.corosync_qdevice_net_client_ca_file_name + ) + try: + if not os.path.exists(ca_file_path): + os.makedirs( + settings.corosync_qdevice_net_client_certs_dir, + mode=0o700 + ) + with open(ca_file_path, "wb") as ca_file: + ca_file.write(ca_certificate) + except EnvironmentError as e: + raise LibraryError( + reports.qdevice_initialization_error(__model, e.strerror) + ) + # initialize client's certificate storage + output, retval = runner.run([ + __qdevice_certutil, "-i", "-c", ca_file_path + ]) + if retval != 0: + raise LibraryError( + reports.qdevice_initialization_error(__model, output.rstrip()) + ) + +def client_initialized(): + """ + check if qdevice net client certificate database has been initialized + """ + return os.path.exists(os.path.join( + settings.corosync_qdevice_net_client_certs_dir, + "cert8.db" + )) + +def client_destroy(): + """ + delete qdevice client config files on local host + """ + try: + if client_initialized(): + shutil.rmtree(settings.corosync_qdevice_net_client_certs_dir) + except EnvironmentError as e: + raise LibraryError( + reports.qdevice_destroy_error(__model, e.strerror) + ) + +def client_generate_certificate_request(runner, cluster_name): + """ + create a certificate request which can be signed by qnetd server + string cluster_name name of the cluster to which qdevice is being added + """ + if not client_initialized(): + raise LibraryError(reports.qdevice_not_initialized(__model)) + output, retval = runner.run([ + __qdevice_certutil, "-r", "-n", cluster_name + ]) + if retval != 0: + raise LibraryError( + reports.qdevice_initialization_error(__model, output.rstrip()) + ) + return _get_output_certificate( + output, + functools.partial(reports.qdevice_initialization_error, __model) + ) + +def client_cert_request_to_pk12(runner, cert_request): + """ + transform signed certificate request to pk12 certificate which can be + imported to nodes + cert_request signed certificate request + """ + if not client_initialized(): + raise LibraryError(reports.qdevice_not_initialized(__model)) + # save the signed certificate request, corosync tool only works with files + tmpfile = _store_to_tmpfile( + cert_request, + reports.qdevice_certificate_import_error + ) + # transform it + output, retval = runner.run([ + __qdevice_certutil, "-M", "-c", tmpfile.name + ]) + tmpfile.close() # temp file is deleted on close + if retval != 0: + raise LibraryError( + reports.qdevice_certificate_import_error(output) + ) + # get resulting pk12, corosync tool only works with files + return _get_output_certificate( + output, + reports.qdevice_certificate_import_error + ) + +def client_import_certificate_and_key(runner, pk12_certificate): + """ + import qdevice client certificate to the local node certificate storage + """ + if not client_initialized(): + raise LibraryError(reports.qdevice_not_initialized(__model)) + # save the certificate, corosync tool only works with files + tmpfile = _store_to_tmpfile( + pk12_certificate, + reports.qdevice_certificate_import_error + ) + output, retval = runner.run([ + __qdevice_certutil, "-m", "-c", tmpfile.name + ]) + tmpfile.close() # temp file is deleted on close + if retval != 0: + raise LibraryError( + reports.qdevice_certificate_import_error(output) + ) + +def remote_qdevice_get_ca_certificate(node_communicator, host): + """ + connect to a qnetd host and get qnetd CA certificate + string host address of the qnetd host + """ + try: + return base64.b64decode( + node_communicator.call_host( + host, + "remote/qdevice_net_get_ca_certificate", + None + ) + ) + except (TypeError, binascii.Error): + raise LibraryError(reports.invalid_response_format(host)) + +def remote_client_setup(node_communicator, node, qnetd_ca_certificate): + """ + connect to a remote node and initialize qdevice there + NodeAddresses node target node + qnetd_ca_certificate qnetd CA certificate + """ + return node_communicator.call_node( + node, + "remote/qdevice_net_client_init_certificate_storage", + external.NodeCommunicator.format_data_dict([ + ("ca_certificate", base64.b64encode(qnetd_ca_certificate)), + ]) + ) + +def remote_sign_certificate_request( + node_communicator, host, cert_request, cluster_name +): + """ + connect to a qdevice host and sign node certificate there + string host address of the qnetd host + cert_request certificate request to be signed + string cluster_name name of the cluster to which qdevice is being added + """ + try: + return base64.b64decode( + node_communicator.call_host( + host, + "remote/qdevice_net_sign_node_certificate", + external.NodeCommunicator.format_data_dict([ + ("certificate_request", base64.b64encode(cert_request)), + ("cluster_name", cluster_name), + ]) + ) + ) + except (TypeError, binascii.Error): + raise LibraryError(reports.invalid_response_format(host)) + +def remote_client_import_certificate_and_key(node_communicator, node, pk12): + """ + import pk12 certificate on a remote node + NodeAddresses node target node + pk12 certificate + """ + return node_communicator.call_node( + node, + "remote/qdevice_net_client_import_certificate", + external.NodeCommunicator.format_data_dict([ + ("certificate", base64.b64encode(pk12)), + ]) + ) + +def remote_client_destroy(node_communicator, node): + """ + delete qdevice client config files on a remote node + NodeAddresses node target node + """ + return node_communicator.call_node( + node, + "remote/qdevice_net_client_destroy", + None + ) + +def _store_to_tmpfile(data, report_func): + try: + tmpfile = tempfile.NamedTemporaryFile(mode="wb", suffix=".pcs") + tmpfile.write(data) + tmpfile.flush() + return tmpfile + except EnvironmentError as e: + raise LibraryError(report_func(e.strerror)) + +def _get_output_certificate(cert_tool_output, report_func): + regexp = re.compile(r"^Certificate( request)? stored in (?P.+)$") + filename = None + for line in cert_tool_output.splitlines(): + match = regexp.search(line) + if match: + filename = match.group("path") + if not filename: + raise LibraryError(report_func(cert_tool_output)) + try: + with open(filename, "rb") as cert_file: + return cert_file.read() + except EnvironmentError as e: + raise LibraryError(report_func( + "{path}: {error}".format(path=filename, error=e.strerror) + )) diff -Nru pcs-0.9.151/pcs/lib/env.py pcs-0.9.153/pcs/lib/env.py --- pcs-0.9.151/pcs/lib/env.py 2016-05-23 13:20:12.000000000 +0000 +++ pcs-0.9.153/pcs/lib/env.py 2016-07-01 13:31:39.000000000 +0000 @@ -10,6 +10,7 @@ from pcs.lib import reports from pcs.lib.external import ( is_cman_cluster, + is_service_running, CommandRunner, NodeCommunicator, ) @@ -21,12 +22,14 @@ from pcs.lib.nodes_task import ( distribute_corosync_conf, check_corosync_offline_on_nodes, + qdevice_reload_on_nodes, ) from pcs.lib.pacemaker import ( get_cib, get_cib_xml, replace_cib_configuration_xml, ) +from pcs.lib.cib.tools import ensure_cib_version class LibraryEnvironment(object): @@ -54,6 +57,7 @@ # related code currently - it's in pcsd self._auth_tokens_getter = auth_tokens_getter self._auth_tokens = None + self._cib_upgraded = False @property def logger(self): @@ -77,27 +81,45 @@ self._is_cman_cluster = is_cman_cluster(self.cmd_runner()) return self._is_cman_cluster - def get_cib_xml(self): + @property + def cib_upgraded(self): + return self._cib_upgraded + + def _get_cib_xml(self): if self.is_cib_live: return get_cib_xml(self.cmd_runner()) else: return self._cib_data - def get_cib(self): - return get_cib(self.get_cib_xml()) + def get_cib(self, minimal_version=None): + cib = get_cib(self._get_cib_xml()) + if minimal_version is not None: + upgraded_cib = ensure_cib_version( + self.cmd_runner(), cib, minimal_version + ) + if upgraded_cib is not None: + cib = upgraded_cib + self._cib_upgraded = True + return cib - def push_cib_xml(self, cib_data): + def _push_cib_xml(self, cib_data): if self.is_cib_live: - replace_cib_configuration_xml(self.cmd_runner(), cib_data) + replace_cib_configuration_xml( + self.cmd_runner(), cib_data, self._cib_upgraded + ) + if self._cib_upgraded: + self._cib_upgraded = False + self.report_processor.process(reports.cib_upgrade_successful()) else: self._cib_data = cib_data + def push_cib(self, cib): #etree returns bytes: b'xml' #python 3 removed .encode() from bytes #run(...) calls subprocess.Popen.communicate which calls encode... #so here is bytes to str conversion - self.push_cib_xml(etree.tostring(cib).decode()) + self._push_cib_xml(etree.tostring(cib).decode()) @property def is_cib_live(self): @@ -132,11 +154,18 @@ corosync_conf_data, skip_offline_nodes ) - if not corosync_conf_facade.need_stopped_cluster: + if is_service_running(self.cmd_runner(), "corosync"): reload_corosync_config(self.cmd_runner()) self.report_processor.process( reports.corosync_config_reloaded() ) + if corosync_conf_facade.need_qdevice_reload: + qdevice_reload_on_nodes( + self.node_communicator(), + self.report_processor, + node_list, + skip_offline_nodes + ) else: self._corosync_conf_data = corosync_conf_data diff -Nru pcs-0.9.151/pcs/lib/errors.py pcs-0.9.153/pcs/lib/errors.py --- pcs-0.9.151/pcs/lib/errors.py 2016-05-23 13:20:12.000000000 +0000 +++ pcs-0.9.153/pcs/lib/errors.py 2016-07-01 13:31:39.000000000 +0000 @@ -42,4 +42,8 @@ self.message = self.message_pattern.format(**self.info) def __repr__(self): - return self.code+": "+str(self.info) + return "{severity} {code}: {info}".format( + severity=self.severity, + code=self.code, + info=self.info + ) diff -Nru pcs-0.9.151/pcs/lib/external.py pcs-0.9.153/pcs/lib/external.py --- pcs-0.9.151/pcs/lib/external.py 2016-05-23 13:20:12.000000000 +0000 +++ pcs-0.9.153/pcs/lib/external.py 2016-07-01 13:31:39.000000000 +0000 @@ -49,13 +49,240 @@ from pcs.lib import reports from pcs.lib.errors import LibraryError, ReportItemSeverity +from pcs.common import report_codes +from pcs.common.tools import ( + simple_cache, + run_parallel as tools_run_parallel, +) from pcs import settings +class ManageServiceError(Exception): + #pylint: disable=super-init-not-called + def __init__(self, service, message=None): + self.service = service + self.message = message + +class DisableServiceError(ManageServiceError): + pass + +class EnableServiceError(ManageServiceError): + pass + +class StartServiceError(ManageServiceError): + pass + +class StopServiceError(ManageServiceError): + pass + +class KillServicesError(ManageServiceError): + pass + + def is_path_runnable(path): return os.path.isfile(path) and os.access(path, os.X_OK) +def is_dir_nonempty(path): + if not os.path.exists(path): + return False + if not os.path.isdir(path): + return True + return len(os.listdir(path)) > 0 + + +@simple_cache +def is_systemctl(): + """ + Check whenever is local system running on systemd. + Returns True if current system is systemctl compatible, False otherwise. + """ + systemctl_paths = [ + '/usr/bin/systemctl', + '/bin/systemctl', + '/var/run/systemd/system', + ] + for path in systemctl_paths: + if os.path.exists(path): + return True + return False + + +def disable_service(runner, service): + """ + Disable specified service in local system. + Raise DisableServiceError or LibraryError on failure. + + runner -- CommandRunner + service -- name of service + """ + if is_systemctl(): + output, retval = runner.run([ + "systemctl", "disable", service + ".service" + ]) + else: + if not is_service_installed(runner, service): + return + output, retval = runner.run(["chkconfig", service, "off"]) + if retval != 0: + raise DisableServiceError(service, output.rstrip()) + + +def enable_service(runner, service): + """ + Enable specified service in local system. + Raise EnableServiceError or LibraryError on failure. + + runner -- CommandRunner + service -- name of service + """ + if is_systemctl(): + output, retval = runner.run([ + "systemctl", "enable", service + ".service" + ]) + else: + output, retval = runner.run(["chkconfig", service, "on"]) + if retval != 0: + raise EnableServiceError(service, output.rstrip()) + + +def start_service(runner, service): + """ + Start specified service in local system + CommandRunner runner + string service service name + """ + if is_systemctl(): + output, retval = runner.run([ + "systemctl", "start", "{0}.service".format(service) + ]) + else: + output, retval = runner.run(["service", service, "start"]) + if retval != 0: + raise StartServiceError(service, output.rstrip()) + + +def stop_service(runner, service): + """ + Stop specified service in local system + CommandRunner runner + string service service name + """ + if is_systemctl(): + output, retval = runner.run([ + "systemctl", "stop", "{0}.service".format(service) + ]) + else: + output, retval = runner.run(["service", service, "stop"]) + if retval != 0: + raise StopServiceError(service, output.rstrip()) + + +def kill_services(runner, services): + """ + Kill specified services in local system + CommandRunner runner + iterable services service names + """ + # make killall not report that a process is not running + output, retval = runner.run( + ["killall", "--quiet", "--signal", "9", "--"] + list(services) + ) + # If a process isn't running, killall will still return 1 even with --quiet. + # We don't consider that an error, so we check for output string as well. + # If it's empty, no actuall error happened. + if retval != 0: + if output.strip(): + raise KillServicesError(list(services), output.rstrip()) + + +def is_service_enabled(runner, service): + """ + Check if specified service is enabled in local system. + + runner -- CommandRunner + service -- name of service + """ + if is_systemctl(): + _, retval = runner.run( + ["systemctl", "is-enabled", service + ".service"] + ) + else: + _, retval = runner.run(["chkconfig", service]) + + return retval == 0 + + +def is_service_running(runner, service): + """ + Check if specified service is currently running on local system. + + runner -- CommandRunner + service -- name of service + """ + if is_systemctl(): + _, retval = runner.run(["systemctl", "is-active", service + ".service"]) + else: + _, retval = runner.run(["service", service, "status"]) + + return retval == 0 + + +def is_service_installed(runner, service): + """ + Check if specified service is installed on local system. + + runner -- CommandRunner + service -- name of service + """ + if is_systemctl(): + return service in get_systemd_services(runner) + else: + return service in get_non_systemd_services(runner) + + +def get_non_systemd_services(runner): + """ + Returns list of all installed services on non systemd system. + + runner -- CommandRunner + """ + if is_systemctl(): + return [] + + output, return_code = runner.run(["chkconfig"], ignore_stderr=True) + if return_code != 0: + return [] + + service_list = [] + for service in output.splitlines(): + service = service.split(" ", 1)[0] + if service: + service_list.append(service) + return service_list + + +def get_systemd_services(runner): + """ + Returns list of all systemd services installed on local system. + + runner -- CommandRunner + """ + if not is_systemctl(): + return [] + + output, return_code = runner.run(["systemctl", "list-unit-files", "--full"]) + if return_code != 0: + return [] + + service_list = [] + for service in output.splitlines(): + match = re.search(r'^([\S]*)\.service', service) + if match: + service_list.append(match.group(1)) + return service_list + + def is_cman_cluster(runner): """ Detect if underlaying locally installed cluster is CMAN based @@ -155,6 +382,8 @@ class NodePermissionDeniedException(NodeCommunicationException): pass +class NodeCommandUnsuccessfulException(NodeCommunicationException): + pass class NodeUnsupportedCommandException(NodeCommunicationException): pass @@ -166,6 +395,12 @@ """ Transform NodeCommunicationException to ReportItem """ + if isinstance(e, NodeCommandUnsuccessfulException): + return reports.node_communication_command_unsuccessful( + e.node, + e.command, + e.reason + ) exception_to_report = { NodeAuthenticationException: reports.node_communication_error_not_authorized, @@ -285,7 +520,14 @@ self._reporter.process( reports.node_communication_finished(url, e.code, response_data) ) - if e.code == 401: + if e.code == 400: + # old pcsd protocol: error messages are commonly passed in plain + # text in response body with HTTP code 400 + # we need to be backward compatible with that + raise NodeCommandUnsuccessfulException( + host, request, response_data.rstrip() + ) + elif e.code == 401: raise NodeAuthenticationException( host, request, "HTTP error: {0}".format(e.code) ) @@ -343,3 +585,39 @@ base64.b64encode(" ".join(self._groups).encode("utf-8")) )) return cookies + + +def parallel_nodes_communication_helper( + func, func_args_kwargs, reporter, skip_offline_nodes=False +): + """ + Help running node calls in parallel and handle communication exceptions. + Raise LibraryError on any failure. + + function func function to be run, should be a function calling a node + iterable func_args_kwargs list of tuples: (*args, **kwargs) + bool skip_offline_nodes do not raise LibraryError if a node is unreachable + """ + failure_severity = ReportItemSeverity.ERROR + failure_forceable = report_codes.SKIP_OFFLINE_NODES + if skip_offline_nodes: + failure_severity = ReportItemSeverity.WARNING + failure_forceable = None + report_items = [] + + def _parallel(*args, **kwargs): + try: + func(*args, **kwargs) + except NodeCommunicationException as e: + report_items.append( + node_communicator_exception_to_report_item( + e, + failure_severity, + failure_forceable + ) + ) + except LibraryError as e: + report_items.extend(e.args) + + tools_run_parallel(_parallel, func_args_kwargs) + reporter.process_list(report_items) diff -Nru pcs-0.9.151/pcs/lib/node.py pcs-0.9.153/pcs/lib/node.py --- pcs-0.9.151/pcs/lib/node.py 2016-05-23 13:20:12.000000000 +0000 +++ pcs-0.9.153/pcs/lib/node.py 2016-07-01 13:31:39.000000000 +0000 @@ -6,12 +6,44 @@ ) +class NodeNotFound(Exception): + pass + + class NodeAddresses(object): def __init__(self, ring0, ring1=None, name=None, id=None): - self.ring0 = ring0 - self.ring1 = ring1 - self.name = name - self.id = id + self._ring0 = ring0 + self._ring1 = ring1 + self._name = name + self._id = id + + def __hash__(self): + return hash(self.label) + + def __eq__(self, other): + return self.label == other.label + + def __ne__(self, other): + return not (self == other) + + def __lt__(self, other): + return self.label < other.label + + @property + def ring0(self): + return self._ring0 + + @property + def ring1(self): + return self._ring1 + + @property + def name(self): + return self._name + + @property + def id(self): + return self._id @property def label(self): @@ -38,3 +70,9 @@ def __reversed__(self): return self._list.__reversed__() + + def find_by_label(self, label): + for node in self._list: + if node.label == label: + return node + raise NodeNotFound() diff -Nru pcs-0.9.151/pcs/lib/nodes_task.py pcs-0.9.153/pcs/lib/nodes_task.py --- pcs-0.9.151/pcs/lib/nodes_task.py 2016-05-23 13:20:12.000000000 +0000 +++ pcs-0.9.153/pcs/lib/nodes_task.py 2016-07-01 13:31:39.000000000 +0000 @@ -8,13 +8,19 @@ import json from pcs.common import report_codes +from pcs.common.tools import run_parallel as tools_run_parallel from pcs.lib import reports -from pcs.lib.errors import ReportItemSeverity +from pcs.lib.errors import LibraryError, ReportItemSeverity from pcs.lib.external import ( + NodeCommunicator, NodeCommunicationException, node_communicator_exception_to_report_item, + parallel_nodes_communication_helper, +) +from pcs.lib.corosync import ( + live as corosync_live, + qdevice_client, ) -from pcs.lib.corosync import live as corosync_live def distribute_corosync_conf( @@ -32,11 +38,9 @@ if skip_offline_nodes: failure_severity = ReportItemSeverity.WARNING failure_forceable = None - - reporter.process(reports.corosync_config_distribution_started()) report_items = [] - # TODO use parallel communication - for node in node_addr_list: + + def _parallel(node): try: corosync_live.set_remote_corosync_conf( node_communicator, @@ -61,6 +65,12 @@ failure_forceable ) ) + + reporter.process(reports.corosync_config_distribution_started()) + tools_run_parallel( + _parallel, + [((node, ), {}) for node in node_addr_list] + ) reporter.process_list(report_items) def check_corosync_offline_on_nodes( @@ -76,13 +86,11 @@ if skip_offline_nodes: failure_severity = ReportItemSeverity.WARNING failure_forceable = None - - reporter.process(reports.corosync_not_running_check_started()) report_items = [] - # TODO use parallel communication - for node in node_addr_list: + + def _parallel(node): try: - status = node_communicator.call_node(node, "remote/status", "") + status = node_communicator.call_node(node, "remote/status", None) if not json.loads(status)["corosync"]: reporter.process( reports.corosync_not_running_on_node_ok(node.label) @@ -114,4 +122,58 @@ failure_forceable ) ) + + reporter.process(reports.corosync_not_running_check_started()) + tools_run_parallel( + _parallel, + [((node, ), {}) for node in node_addr_list] + ) reporter.process_list(report_items) + +def qdevice_reload_on_nodes( + node_communicator, reporter, node_addr_list, skip_offline_nodes=False +): + """ + Reload corosync-qdevice configuration on cluster nodes + NodeAddressesList node_addr_list nodes to reload config on + bool skip_offline_nodes don't raise an error on node communication errors + """ + reporter.process(reports.qdevice_client_reload_started()) + parallel_params = [ + [(reporter, node_communicator, node), {}] + for node in node_addr_list + ] + # catch an exception so we try to start qdevice on nodes where we stopped it + report_items = [] + try: + parallel_nodes_communication_helper( + qdevice_client.remote_client_stop, + parallel_params, + reporter, + skip_offline_nodes + ) + except LibraryError as e: + report_items.extend(e.args) + try: + parallel_nodes_communication_helper( + qdevice_client.remote_client_start, + parallel_params, + reporter, + skip_offline_nodes + ) + except LibraryError as e: + report_items.extend(e.args) + reporter.process_list(report_items) + +def node_check_auth(communicator, node): + """ + Check authentication and online status of 'node'. + + communicator -- NodeCommunicator + node -- NodeAddresses + """ + communicator.call_node( + node, + "remote/check_auth", + NodeCommunicator.format_data_dict({"check_auth_only": 1}) + ) diff -Nru pcs-0.9.151/pcs/lib/pacemaker.py pcs-0.9.153/pcs/lib/pacemaker.py --- pcs-0.9.151/pcs/lib/pacemaker.py 2016-05-23 13:20:12.000000000 +0000 +++ pcs-0.9.153/pcs/lib/pacemaker.py 2016-07-01 13:31:39.000000000 +0000 @@ -55,24 +55,21 @@ except (etree.XMLSyntaxError, etree.DocumentInvalid): raise LibraryError(reports.cib_load_error_invalid_format()) -def replace_cib_configuration_xml(runner, xml): - output, retval = runner.run( - [ - __exec("cibadmin"), - "--replace", "--scope", "configuration", "--verbose", "--xml-pipe" - ], - stdin_string=xml - ) +def replace_cib_configuration_xml(runner, xml, cib_upgraded=False): + cmd = [__exec("cibadmin"), "--replace", "--verbose", "--xml-pipe"] + if not cib_upgraded: + cmd += ["--scope", "configuration"] + output, retval = runner.run(cmd, stdin_string=xml) if retval != 0: raise LibraryError(reports.cib_push_error(retval, output)) -def replace_cib_configuration(runner, tree): +def replace_cib_configuration(runner, tree, cib_upgraded=False): #etree returns bytes: b'xml' #python 3 removed .encode() from bytes #run(...) calls subprocess.Popen.communicate which calls encode... #so here is bytes to str conversion xml = etree.tostring(tree).decode() - return replace_cib_configuration_xml(runner, xml) + return replace_cib_configuration_xml(runner, xml, cib_upgraded) def get_local_node_status(runner): try: diff -Nru pcs-0.9.151/pcs/lib/reports.py pcs-0.9.153/pcs/lib/reports.py --- pcs-0.9.151/pcs/lib/reports.py 2016-05-23 13:20:12.000000000 +0000 +++ pcs-0.9.153/pcs/lib/reports.py 2016-07-01 13:31:39.000000000 +0000 @@ -411,6 +411,22 @@ forceable=forceable ) +def node_communication_command_unsuccessful(node, command, reason): + """ + node rejected a request for another reason with a plain text explanation + node string node address / name + reason string decription of the error + """ + return ReportItem.error( + report_codes.NODE_COMMUNICATION_COMMAND_UNSUCCESSFUL, + "{node}: {reason}", + info={ + "node": node, + "command": command, + "reason": reason, + } + ) + def node_communication_error_other_error( node, command, reason, severity=ReportItemSeverity.ERROR, forceable=None @@ -536,6 +552,32 @@ info={"node": node} ) +def corosync_quorum_get_status_error(reason): + """ + unable to get runtime status of quorum on local node + string reason an error message + """ + return ReportItem.error( + report_codes.COROSYNC_QUORUM_GET_STATUS_ERROR, + "Unable to get quorum status: {reason}", + info={ + "reason": reason, + } + ) + +def corosync_quorum_set_expected_votes_error(reason): + """ + unable to set expcted votes in a live cluster + string reason an error message + """ + return ReportItem.error( + report_codes.COROSYNC_QUORUM_SET_EXPECTED_VOTES_ERROR, + "Unable to set expected votes: {reason}", + info={ + "reason": reason, + } + ) + def corosync_config_reloaded(): """ corosync configuration has been reloaded @@ -598,6 +640,21 @@ "Unable to parse corosync config" ) +def corosync_options_incompatible_with_qdevice(options): + """ + cannot set specified corosync options when qdevice is in use + iterable options incompatible options names + """ + return ReportItem.error( + report_codes.COROSYNC_OPTIONS_INCOMPATIBLE_WITH_QDEVICE, + "These options cannot be set when the cluster uses a quorum device: " + + "{options_names_str}", + info={ + "options_names": options, + "options_names_str": ", ".join(sorted(options)), + } + ) + def qdevice_already_defined(): """ qdevice is already set up in a cluster, when it was expected not to be @@ -616,6 +673,187 @@ "no quorum device is defined in this cluster" ) +def qdevice_remove_or_cluster_stop_needed(): + """ + operation cannot be executed, qdevice removal or cluster stop is needed + """ + return ReportItem.error( + report_codes.QDEVICE_REMOVE_OR_CLUSTER_STOP_NEEDED, + "You need to stop the cluster or remove qdevice from cluster to continue" + ) + +def qdevice_client_reload_started(): + """ + qdevice client configuration is about to be reloaded on nodes + """ + return ReportItem.info( + report_codes.QDEVICE_CLIENT_RELOAD_STARTED, + "Reloading qdevice configuration on nodes..." + ) + +def qdevice_already_initialized(model): + """ + cannot create qdevice on local host, it has been already created + string model qdevice model + """ + return ReportItem.error( + report_codes.QDEVICE_ALREADY_INITIALIZED, + "Quorum device '{model}' has been already initialized", + info={ + "model": model, + } + ) + +def qdevice_not_initialized(model): + """ + cannot work with qdevice on local host, it has not been created yet + string model qdevice model + """ + return ReportItem.error( + report_codes.QDEVICE_NOT_INITIALIZED, + "Quorum device '{model}' has not been initialized yet", + info={ + "model": model, + } + ) + +def qdevice_initialization_success(model): + """ + qdevice was successfully initialized on local host + string model qdevice model + """ + return ReportItem.info( + report_codes.QDEVICE_INITIALIZATION_SUCCESS, + "Quorum device '{model}' initialized", + info={ + "model": model, + } + ) + +def qdevice_initialization_error(model, reason): + """ + an error occured when creating qdevice on local host + string model qdevice model + string reason an error message + """ + return ReportItem.error( + report_codes.QDEVICE_INITIALIZATION_ERROR, + "Unable to initialize quorum device '{model}': {reason}", + info={ + "model": model, + "reason": reason, + } + ) + +def qdevice_certificate_distribution_started(): + """ + Qdevice certificates are about to be set up on nodes + """ + return ReportItem.info( + report_codes.QDEVICE_CERTIFICATE_DISTRIBUTION_STARTED, + "Setting up qdevice certificates on nodes..." + ) + +def qdevice_certificate_accepted_by_node(node): + """ + Qdevice certificates have been saved to a node + string node node on which certificates have been saved + """ + return ReportItem.info( + report_codes.QDEVICE_CERTIFICATE_ACCEPTED_BY_NODE, + "{node}: Succeeded", + info={"node": node} + ) + +def qdevice_certificate_removal_started(): + """ + Qdevice certificates are about to be removed from nodes + """ + return ReportItem.info( + report_codes.QDEVICE_CERTIFICATE_REMOVAL_STARTED, + "Removing qdevice certificates from nodes..." + ) + +def qdevice_certificate_removed_from_node(node): + """ + Qdevice certificates have been removed from a node + string node node on which certificates have been deleted + """ + return ReportItem.info( + report_codes.QDEVICE_CERTIFICATE_REMOVED_FROM_NODE, + "{node}: Succeeded", + info={"node": node} + ) + +def qdevice_certificate_import_error(reason): + """ + an error occured when importing qdevice certificate to a node + string reason an error message + """ + return ReportItem.error( + report_codes.QDEVICE_CERTIFICATE_IMPORT_ERROR, + "Unable to import quorum device certificate: {reason}", + info={ + "reason": reason, + } + ) + +def qdevice_certificate_sign_error(reason): + """ + an error occured when signing qdevice certificate + string reason an error message + """ + return ReportItem.error( + report_codes.QDEVICE_CERTIFICATE_SIGN_ERROR, + "Unable to sign quorum device certificate: {reason}", + info={ + "reason": reason, + } + ) + +def qdevice_destroy_success(model): + """ + qdevice configuration successfully removed from local host + string model qdevice model + """ + return ReportItem.info( + report_codes.QDEVICE_DESTROY_SUCCESS, + "Quorum device '{model}' configuration files removed", + info={ + "model": model, + } + ) + +def qdevice_destroy_error(model, reason): + """ + an error occured when removing qdevice configuration from local host + string model qdevice model + string reason an error message + """ + return ReportItem.error( + report_codes.QDEVICE_DESTROY_ERROR, + "Unable to destroy quorum device '{model}': {reason}", + info={ + "model": model, + "reason": reason, + } + ) + +def qdevice_get_status_error(model, reason): + """ + unable to get runtime status of qdevice + string model qdevice model + string reason an error message + """ + return ReportItem.error( + report_codes.QDEVICE_GET_STATUS_ERROR, + "Unable to get status of quorum device '{model}': {reason}", + info={ + "model": model, + "reason": reason, + } + ) + def cman_unsupported_command(): """ requested library command is not available as local cluster is CMAN based @@ -914,3 +1152,594 @@ "Enabling broadcast for all rings as CMAN does not support " + "broadcast in only one ring" ) + +def service_start_started(service): + """ + system service is being started + string service service name or description + """ + return ReportItem.info( + report_codes.SERVICE_START_STARTED, + "Starting {service}...", + info={ + "service": service, + } + ) + +def service_start_error(service, reason, node=None): + """ + system service start failed + string service service name or description + string reason error message + string node node on which service has been requested to start + """ + msg = "Unable to start {service}: {reason}" + return ReportItem.error( + report_codes.SERVICE_START_ERROR, + msg if node is None else "{node}: " + msg, + info={ + "service": service, + "reason": reason, + "node": node, + } + ) + +def service_start_success(service, node=None): + """ + system service was started successfully + string service service name or description + string node node on which service has been requested to start + """ + msg = "{service} started" + return ReportItem.info( + report_codes.SERVICE_START_SUCCESS, + msg if node is None else "{node}: " + msg, + info={ + "service": service, + "node": node, + } + ) + +def service_start_skipped(service, reason, node=None): + """ + starting system service was skipped, no error occured + string service service name or description + string reason why the start has been skipped + string node node on which service has been requested to start + """ + msg = "not starting {service} - {reason}" + return ReportItem.info( + report_codes.SERVICE_START_SKIPPED, + msg if node is None else "{node}: " + msg, + info={ + "service": service, + "reason": reason, + "node": node, + } + ) + +def service_stop_started(service): + """ + system service is being stopped + string service service name or description + """ + return ReportItem.info( + report_codes.SERVICE_STOP_STARTED, + "Stopping {service}...", + info={ + "service": service, + } + ) + +def service_stop_error(service, reason, node=None): + """ + system service stop failed + string service service name or description + string reason error message + string node node on which service has been requested to stop + """ + msg = "Unable to stop {service}: {reason}" + return ReportItem.error( + report_codes.SERVICE_STOP_ERROR, + msg if node is None else "{node}: " + msg, + info={ + "service": service, + "reason": reason, + "node": node, + } + ) + +def service_stop_success(service, node=None): + """ + system service was stopped successfully + string service service name or description + string node node on which service has been requested to stop + """ + msg = "{service} stopped" + return ReportItem.info( + report_codes.SERVICE_STOP_SUCCESS, + msg if node is None else "{node}: " + msg, + info={ + "service": service, + "node": node, + } + ) + +def service_kill_error(services, reason): + """ + system services kill failed + iterable services services name or description + string reason error message + """ + return ReportItem.error( + report_codes.SERVICE_KILL_ERROR, + "Unable to kill {services_str}: {reason}", + info={ + "services": services, + "services_str": ", ".join(services), + "reason": reason, + } + ) + +def service_kill_success(services): + """ + system services were killed successfully + iterable services services name or description + """ + return ReportItem.info( + report_codes.SERVICE_KILL_SUCCESS, + "{services_str} killed", + info={ + "services": services, + "services_str": ", ".join(services), + } + ) + +def service_enable_started(service): + """ + system service is being enabled + string service service name or description + """ + return ReportItem.info( + report_codes.SERVICE_ENABLE_STARTED, + "Enabling {service}...", + info={ + "service": service, + } + ) + +def service_enable_error(service, reason, node=None): + """ + system service enable failed + string service service name or description + string reason error message + string node node on which service was enabled + """ + msg = "Unable to enable {service}: {reason}" + return ReportItem.error( + report_codes.SERVICE_ENABLE_ERROR, + msg if node is None else "{node}: " + msg, + info={ + "service": service, + "reason": reason, + "node": node, + } + ) + +def service_enable_success(service, node=None): + """ + system service was enabled successfully + string service service name or description + string node node on which service has been enabled + """ + msg = "{service} enabled" + return ReportItem.info( + report_codes.SERVICE_ENABLE_SUCCESS, + msg if node is None else "{node}: " + msg, + info={ + "service": service, + "node": node, + } + ) + +def service_enable_skipped(service, reason, node=None): + """ + enabling system service was skipped, no error occured + string service service name or description + string reason why the enabling has been skipped + string node node on which service has been requested to enable + """ + msg = "not enabling {service} - {reason}" + return ReportItem.info( + report_codes.SERVICE_ENABLE_SKIPPED, + msg if node is None else "{node}: " + msg, + info={ + "service": service, + "reason": reason, + "node": node, + } + ) + +def service_disable_started(service): + """ + system service is being disabled + string service service name or description + """ + return ReportItem.info( + report_codes.SERVICE_DISABLE_STARTED, + "Disabling {service}...", + info={ + "service": service, + } + ) + +def service_disable_error(service, reason, node=None): + """ + system service disable failed + string service service name or description + string reason error message + string node node on which service was disabled + """ + msg = "Unable to disable {service}: {reason}" + return ReportItem.error( + report_codes.SERVICE_DISABLE_ERROR, + msg if node is None else "{node}: " + msg, + info={ + "service": service, + "reason": reason, + "node": node, + } + ) + +def service_disable_success(service, node=None): + """ + system service was disabled successfully + string service service name or description + string node node on which service was disabled + """ + msg = "{service} disabled" + return ReportItem.info( + report_codes.SERVICE_DISABLE_SUCCESS, + msg if node is None else "{node}: " + msg, + info={ + "service": service, + "node": node, + } + ) + +def invalid_metadata_format(severity=ReportItemSeverity.ERROR, forceable=None): + """ + Invalid format of metadata + """ + return ReportItem( + report_codes.INVALID_METADATA_FORMAT, + severity, + "Invalid metadata format", + forceable=forceable + ) + +def unable_to_get_agent_metadata( + agent, reason, severity=ReportItemSeverity.ERROR, forceable=None +): + """ + There were some issues trying to get metadata of agent + + agent -- agent which metadata were unable to obtain + reason -- reason of failure + """ + return ReportItem( + report_codes.UNABLE_TO_GET_AGENT_METADATA, + severity, + "Unable to get metadata of '{agent}': {reason}", + info={ + "agent": agent, + "reason": reason + }, + forceable=forceable + ) + + +def agent_not_found(agent, severity=ReportItemSeverity.ERROR, forceable=None): + """ + Specified agent doesn't exist + + agent -- name of agent which doesn't exist + """ + return ReportItem( + report_codes.AGENT_NOT_FOUND, + severity, + "Agent '{agent}' not found", + info={"agent": agent}, + forceable=forceable + ) + + +def agent_not_supported( + agent, severity=ReportItemSeverity.ERROR, forceable=None +): + """ + Specified agent is not supported + + agent -- name of agent which is not supported + """ + return ReportItem( + report_codes.UNSUPPORTED_AGENT, + severity, + "Agent '{agent}' is not supported", + info={"agent": agent}, + forceable=forceable + ) + + +def resource_agent_general_error(agent=None): + """ + General not specific error of resource or fence agent. + + agent -- agent name + """ + msg = "Unspecified problem of resource/fence agent" + return ReportItem.error( + report_codes.AGENT_GENERAL_ERROR, + msg if agent is None else msg + " '{agent}'", + info={"agent": agent} + ) + + +def omitting_node(node): + """ + warning that specified node will be omitted in following actions + + node -- node name + """ + return ReportItem.warning( + report_codes.OMITTING_NODE, + "Omitting node '{node}'", + info={"node": node} + ) + + +def sbd_check_started(): + """ + info that SBD pre-enabling checks started + """ + return ReportItem.info( + report_codes.SBD_CHECK_STARTED, + "Running SBD pre-enabling checks..." + ) + + +def sbd_check_success(node): + """ + info that SBD pre-enabling check finished without issues on specified node + + node -- node name + """ + return ReportItem.info( + report_codes.SBD_CHECK_SUCCESS, + "{node}: SBD pre-enabling checks done", + info={"node": node} + ) + + +def sbd_config_distribution_started(): + """ + distribution of SBD configuration started + """ + return ReportItem.info( + report_codes.SBD_CONFIG_DISTRIBUTION_STARTED, + "Distributing SBD config..." + ) + + +def sbd_config_accepted_by_node(node): + """ + info that SBD configuration has been saved successfully on specified node + + node -- node name + """ + return ReportItem.info( + report_codes.SBD_CONFIG_ACCEPTED_BY_NODE, + "{node}: SBD config saved", + info={"node": node} + ) + + +def unable_to_get_sbd_config(node, reason, severity=ReportItemSeverity.ERROR): + """ + unable to get SBD config from specified node (communication or parsing + error) + + node -- node name + reason -- reason of failure + """ + return ReportItem( + report_codes.UNABLE_TO_GET_SBD_CONFIG, + severity, + "Unable to get SBD configuration from node '{node}': {reason}", + info={ + "node": node, + "reason": reason + } + ) + + +def sbd_enabling_started(): + """ + enabling SBD service started + """ + return ReportItem.info( + report_codes.SBD_ENABLING_STARTED, + "Enabling SBD service..." + ) + + +def sbd_disabling_started(): + """ + disabling SBD service started + """ + return ReportItem.info( + report_codes.SBD_DISABLING_STARTED, + "Disabling SBD service..." + ) + + +def invalid_response_format(node): + """ + error message that response in invalid format has been received from + specified node + + node -- node name + """ + return ReportItem.error( + report_codes.INVALID_RESPONSE_FORMAT, + "{node}: Invalid format of response", + info={"node": node} + ) + + +def sbd_not_installed(node): + """ + sbd is not installed on specified node + + node -- node name + """ + return ReportItem.error( + report_codes.SBD_NOT_INSTALLED, + "SBD is not installed on node '{node}'", + info={"node": node} + ) + + +def watchdog_not_found(node, watchdog): + """ + watchdog doesn't exist on specified node + + node -- node name + watchdog -- watchdog device path + """ + return ReportItem.error( + report_codes.WATCHDOG_NOT_FOUND, + "Watchdog '{watchdog}' does not exist on node '{node}'", + info={ + "node": node, + "watchdog": watchdog + } + ) + + +def unable_to_get_sbd_status(node, reason): + """ + there was (communication or parsing) failure during obtaining status of SBD + from specified node + + node -- node name + reason -- reason of failure + """ + return ReportItem.warning( + report_codes.UNABLE_TO_GET_SBD_STATUS, + "Unable to get status of SBD from node '{node}': {reason}", + info={ + "node": node, + "reason": reason + } + ) + +def cluster_restart_required_to_apply_changes(): + """ + warn user a cluster needs to be manually restarted to use new configuration + """ + return ReportItem.warning( + report_codes.CLUSTER_RESTART_REQUIRED_TO_APPLY_CHANGES, + "Cluster restart is required in order to apply these changes." + ) + + +def cib_alert_recipient_already_exists(alert_id, recipient_value): + """ + Error that recipient already exists. + + alert_id -- id of alert to which recipient belongs + recipient_value -- value of recipient + """ + return ReportItem.error( + report_codes.CIB_ALERT_RECIPIENT_ALREADY_EXISTS, + "Recipient '{recipient}' in alert '{alert}' already exists.", + info={ + "recipient": recipient_value, + "alert": alert_id + } + ) + + +def cib_alert_recipient_not_found(alert_id, recipient_value): + """ + Specified recipient not found. + + alert_id -- id of alert to which recipient should belong + recipient_value -- recipient value + """ + return ReportItem.error( + report_codes.CIB_ALERT_RECIPIENT_NOT_FOUND, + "Recipient '{recipient}' not found in alert '{alert}'.", + info={ + "recipient": recipient_value, + "alert": alert_id + } + ) + + +def cib_alert_not_found(alert_id): + """ + Alert with specified id doesn't exist. + + alert_id -- id of alert + """ + return ReportItem.error( + report_codes.CIB_ALERT_NOT_FOUND, + "Alert '{alert}' not found.", + info={"alert": alert_id} + ) + + +def cib_upgrade_successful(): + """ + Upgrade of CIB schema was successful. + """ + return ReportItem.info( + report_codes.CIB_UPGRADE_SUCCESSFUL, + "CIB has been upgraded to the latest schema version." + ) + + +def cib_upgrade_failed(reason): + """ + Upgrade of CIB schema failed. + + reason -- reason of failure + """ + return ReportItem.error( + report_codes.CIB_UPGRADE_FAILED, + "Upgrading of CIB to the latest schema failed: {reason}", + info={"reason": reason} + ) + + +def unable_to_upgrade_cib_to_required_version( + current_version, required_version +): + """ + Unable to upgrade CIB to minimal required schema version. + + current_version -- current version of CIB schema + required_version -- required version of CIB schema + """ + return ReportItem.error( + report_codes.CIB_UPGRADE_FAILED_TO_MINIMAL_REQUIRED_VERSION, + "Unable to upgrade CIB to required schema version {required_version} " + "or higher. Current version is {current_version}. Newer version of " + "pacemaker is needed.", + info={ + "required_version": "{0}.{1}.{2}".format(*required_version), + "current_version": "{0}.{1}.{2}".format(*current_version) + } + ) diff -Nru pcs-0.9.151/pcs/lib/resource_agent.py pcs-0.9.153/pcs/lib/resource_agent.py --- pcs-0.9.151/pcs/lib/resource_agent.py 2016-05-23 13:20:12.000000000 +0000 +++ pcs-0.9.153/pcs/lib/resource_agent.py 2016-07-01 13:31:39.000000000 +0000 @@ -9,31 +9,40 @@ from lxml import etree from pcs import settings -from pcs.common import report_codes -from pcs.lib.errors import LibraryError -from pcs.lib.errors import ReportItem +from pcs.lib import reports +from pcs.lib.errors import ReportItemSeverity from pcs.lib.pacemaker_values import is_true from pcs.lib.external import is_path_runnable +from pcs.common import report_codes from pcs.common.tools import simple_cache -class UnsupportedResourceAgent(LibraryError): +class ResourceAgentLibError(Exception): pass -class InvalidAgentName(LibraryError): - pass +class ResourceAgentCommonError(ResourceAgentLibError): + # pylint: disable=super-init-not-called + def __init__(self, agent): + self.agent = agent -class AgentNotFound(LibraryError): +class UnsupportedResourceAgent(ResourceAgentCommonError): pass -class UnableToGetAgentMetadata(LibraryError): +class AgentNotFound(ResourceAgentCommonError): pass -class InvalidMetadataFormat(LibraryError): +class UnableToGetAgentMetadata(ResourceAgentCommonError): + # pylint: disable=super-init-not-called + def __init__(self, agent, message): + self.agent = agent + self.message = message + + +class InvalidMetadataFormat(ResourceAgentLibError): pass @@ -48,14 +57,6 @@ return element.text.strip() -def __get_invalid_metadata_format_exception(): - return InvalidMetadataFormat(ReportItem.error( - report_codes.INVALID_METADATA_FORMAT, - "invalid agent metadata format", - forceable=True - )) - - def _get_parameter(parameter_dom): """ Returns dictionary that describes parameter. @@ -68,11 +69,12 @@ default: default value, required: True if is required parameter, False otherwise } + Raises InvalidMetadataFormat if parameter_dom is not in valid format parameter_dom -- parameter dom element """ if parameter_dom.tag != "parameter" or parameter_dom.get("name") is None: - raise __get_invalid_metadata_format_exception() + raise InvalidMetadataFormat() longdesc = __get_text_from_dom_element(parameter_dom.find("longdesc")) shortdesc = __get_text_from_dom_element(parameter_dom.find("shortdesc")) @@ -95,12 +97,13 @@ def _get_agent_parameters(metadata_dom): """ - Returns list of parameters from agents metadata + Returns list of parameters from agents metadata. + Raises InvalidMetadataFormat if metadata_dom is not in valid format. metadata_dom -- agent's metadata dom """ if metadata_dom.tag != "resource-agent": - raise __get_invalid_metadata_format_exception() + raise InvalidMetadataFormat() params_el = metadata_dom.find("parameters") if params_el is None: @@ -112,19 +115,21 @@ def _get_pcmk_advanced_stonith_parameters(runner): - """Returns advanced instance attributes for stonith devices""" + """ + Returns advanced instance attributes for stonith devices + Raises UnableToGetAgentMetadata if there is problem with obtaining + metadata of stonithd. + Raises InvalidMetadataFormat if obtained metadata are not in valid format. + + runner -- CommandRunner + """ @simple_cache def __get_stonithd_parameters(): output, retval = runner.run( [settings.stonithd_binary, "metadata"], ignore_stderr=True ) if output.strip() == "": - raise UnableToGetAgentMetadata(ReportItem.error( - report_codes.UNABLE_TO_GET_AGENT_METADATA, - "unable to get metadata of stonithd", - info={"external_exitcode": retval, "external_output": output}, - forceable=True - )) + raise UnableToGetAgentMetadata("stonithd", output) try: params = _get_agent_parameters(etree.fromstring(output)) @@ -136,7 +141,7 @@ param["advanced"] = is_advanced return params except etree.XMLSyntaxError: - raise __get_invalid_metadata_format_exception() + raise InvalidMetadataFormat() return __get_stonithd_parameters() @@ -144,18 +149,14 @@ def get_fence_agent_metadata(runner, fence_agent): """ Returns dom of metadata for specified fence agent + Raises AgentNotFound if fence_agent doesn't starts with fence_ or it is + relative path or file is not runnable. + Raises UnableToGetAgentMetadata if there was problem getting or + parsing metadata. + runner -- CommandRunner fence_agent -- fence agent name, should start with 'fence_' """ - - def __get_error(info): - return UnableToGetAgentMetadata(ReportItem.error( - report_codes.UNABLE_TO_GET_AGENT_METADATA, - "unable to get metadata of fence agent '{agent_name}'", - info=info, - forceable=True - )) - script_path = os.path.join(settings.fence_agent_binaries, fence_agent) if not ( @@ -163,36 +164,27 @@ __is_path_abs(script_path) and is_path_runnable(script_path) ): - raise AgentNotFound(ReportItem.error( - report_codes.INVALID_RESOURCE_NAME, - "fence agent '{agent_name}' not found", - info={"agent_name": fence_agent}, - forceable=True - )) + raise AgentNotFound(fence_agent) output, retval = runner.run( [script_path, "-o", "metadata"], ignore_stderr=True ) if output.strip() == "": - raise __get_error({ - "agent_name": fence_agent, - "external_exitcode": retval, - "external_output": output - }) + raise UnableToGetAgentMetadata(fence_agent, output) try: return etree.fromstring(output) except etree.XMLSyntaxError as e: - raise __get_error({ - "agent_name": fence_agent, - "error_info": str(e) - }) + raise UnableToGetAgentMetadata(fence_agent, str(e)) def _get_nagios_resource_agent_metadata(agent): """ - Returns metadata dom for specified nagios resource agent + Returns metadata dom for specified nagios resource agent. + Raises AgentNotFound if agent is relative path. + Raises UnableToGetAgentMetadata if there was problem getting or + parsing metadata. agent -- name of nagios resource agent """ @@ -200,54 +192,32 @@ metadata_path = os.path.join(settings.nagios_metadata_path, agent + ".xml") if not __is_path_abs(metadata_path): - raise AgentNotFound(ReportItem.error( - report_codes.INVALID_RESOURCE_NAME, - "resource agent '{agent_name}' not found", - info={"agent_name": agent_name}, - forceable=True - )) + raise AgentNotFound(agent_name) try: return etree.parse(metadata_path).getroot() except Exception as e: - raise UnableToGetAgentMetadata(ReportItem.error( - report_codes.UNABLE_TO_GET_AGENT_METADATA, - "unable to get metadata of resource agent '{agent_name}': " + - "{error_info}", - info={ - "agent_name": agent_name, - "error_info": str(e) - }, - forceable=True - )) + raise UnableToGetAgentMetadata(agent_name, str(e)) def _get_ocf_resource_agent_metadata(runner, provider, agent): """ Returns metadata dom for specified ocf resource agent + Raises AgentNotFound if specified agent is relative path or file is not + runnable. + Raises UnableToGetAgentMetadata if there was problem getting or + parsing metadata. + runner -- CommandRunner provider -- resource agent provider agent -- resource agent name """ agent_name = "ocf:" + provider + ":" + agent - def __get_error(info): - return UnableToGetAgentMetadata(ReportItem.error( - report_codes.UNABLE_TO_GET_AGENT_METADATA, - "unable to get metadata of resource agent '{agent_name}'", - info=info, - forceable=True - )) - script_path = os.path.join(settings.ocf_resources, provider, agent) if not __is_path_abs(script_path) or not is_path_runnable(script_path): - raise AgentNotFound(ReportItem.error( - report_codes.INVALID_RESOURCE_NAME, - "resource agent '{agent_name}' not found", - info={"agent_name": agent_name}, - forceable=True - )) + raise AgentNotFound(agent_name) output, retval = runner.run( [script_path, "meta-data"], @@ -256,19 +226,12 @@ ) if output.strip() == "": - raise __get_error({ - "agent_name": agent_name, - "external_exitcode": retval, - "external_output": output - }) + raise UnableToGetAgentMetadata(agent_name, output) try: return etree.fromstring(output) except etree.XMLSyntaxError as e: - raise __get_error({ - "agent_name": agent_name, - "error_info": str(e) - }) + raise UnableToGetAgentMetadata(agent_name, str(e)) def get_agent_desc(metadata_dom): @@ -279,11 +242,12 @@ longdesc: long description shortdesc: short description } + Raises InvalidMetadataFormat if metadata_dom is not in valid format. metadata_dom -- metadata dom of agent """ if metadata_dom.tag != "resource-agent": - raise __get_invalid_metadata_format_exception() + raise InvalidMetadataFormat() shortdesc_el = metadata_dom.find("shortdesc") if shortdesc_el is None: @@ -304,7 +268,17 @@ parameters -- list of fence agent parameters """ - banned_parameters = ["debug", "action", "verbose", "version", "help"] + # we don't allow user to change these options, they are intended + # to be used interactively (command line), there is no point setting them + banned_parameters = ["debug", "verbose", "version", "help"] + # but still, we have to let user change 'action' because of backward + # compatibility, just marking it as not required + for param in parameters: + if param["name"] == "action": + param["shortdesc"] = param.get("shortdesc", "") + "\nWARNING: " +\ + "specifying 'action' is deprecated and not necessary with " +\ + "current Pacemaker versions" + param["required"] = False return [ param for param in parameters if param["name"] not in banned_parameters ] @@ -314,6 +288,7 @@ """ Returns complete list of parameters for fence agent from it's metadata. + runner -- CommandRunner metadata_dom -- metadata dom of fence agent """ return ( @@ -335,15 +310,13 @@ def get_resource_agent_metadata(runner, agent): """ Returns metadata of specified agent as dom + Raises UnsupportedResourceAgent if specified agent is not ocf or nagios + agent. + runner -- CommandRunner agent -- agent name """ - error = UnsupportedResourceAgent(ReportItem.error( - report_codes.UNSUPPORTED_RESOURCE_AGENT, - "resource agent '{agent}' is not supported", - info={"agent": agent}, - forceable=True - )) + error = UnsupportedResourceAgent(agent) if agent.startswith("ocf:"): agent_info = agent.split(":", 2) if len(agent_info) != 3: @@ -359,11 +332,12 @@ """ Returns XML action element as dictionary, where all elements attributes are key of dict + Raises InvalidMetadataFormat if action_el is not in valid format. action_el -- action lxml.etree element """ if action_el.tag != "action" or action_el.get("name") is None: - raise __get_invalid_metadata_format_exception() + raise InvalidMetadataFormat() return dict(action_el.items()) @@ -371,11 +345,12 @@ def get_agent_actions(metadata_dom): """ Returns list of actions from agents metadata + Raises InvalidMetadataFormat if metadata_dom is not in valid format. metadata_dom -- agent's metadata dom """ if metadata_dom.tag != "resource-agent": - raise __get_invalid_metadata_format_exception() + raise InvalidMetadataFormat() actions_el = metadata_dom.find("actions") if actions_el is None: @@ -402,6 +377,7 @@ Validates instance attributes according to specified agent. Returns tuple of lists (, ) + runner -- CommandRunner instance_attrs -- dictionary of instance attributes, where key is attribute name and value is attribute value agent -- full name (: or ::) @@ -425,3 +401,35 @@ get_resource_agent_metadata(runner, agent) ) return _validate_instance_attributes(agent_params, instance_attrs) + + +def resource_agent_lib_error_to_report_item( + e, severity=ReportItemSeverity.ERROR, forceable=False +): + """ + Transform ResourceAgentLibError to ReportItem + """ + force = None + if e.__class__ == AgentNotFound: + if severity == ReportItemSeverity.ERROR and forceable: + force = report_codes.FORCE_UNKNOWN_AGENT + return reports.agent_not_found(e.agent, severity, force) + if e.__class__ == UnsupportedResourceAgent: + if severity == ReportItemSeverity.ERROR and forceable: + force = report_codes.FORCE_UNSUPPORTED_AGENT + return reports.agent_not_supported(e.agent, severity, force) + if e.__class__ == UnableToGetAgentMetadata: + if severity == ReportItemSeverity.ERROR and forceable: + force = report_codes.FORCE_METADATA_ISSUE + return reports.unable_to_get_agent_metadata( + e.agent, e.message, severity, force + ) + if e.__class__ == InvalidMetadataFormat: + if severity == ReportItemSeverity.ERROR and forceable: + force = report_codes.FORCE_METADATA_ISSUE + return reports.invalid_metadata_format(severity, force) + if e.__class__ == ResourceAgentCommonError: + return reports.resource_agent_general_error(e.agent) + if e.__class__ == ResourceAgentLibError: + return reports.resource_agent_general_error() + raise e diff -Nru pcs-0.9.151/pcs/lib/sbd.py pcs-0.9.153/pcs/lib/sbd.py --- pcs-0.9.151/pcs/lib/sbd.py 1970-01-01 00:00:00.000000000 +0000 +++ pcs-0.9.153/pcs/lib/sbd.py 2016-07-01 13:31:39.000000000 +0000 @@ -0,0 +1,364 @@ +from __future__ import ( + absolute_import, + division, + print_function, + unicode_literals, +) + +import json + +from pcs import settings +from pcs.common import tools +from pcs.lib import ( + external, + reports, +) +from pcs.lib.tools import dict_to_environment_file +from pcs.lib.external import ( + NodeCommunicator, + node_communicator_exception_to_report_item, + NodeCommunicationException, +) +from pcs.lib.errors import LibraryError + + +def _run_parallel_and_raise_lib_error_on_failure(func, param_list): + """ + Run function func in parallel for all specified parameters in arg_list. + Raise LibraryError on any failure. + + func -- function to be run + param_list -- list of tuples: (*args, **kwargs) + """ + report_list = [] + + def _parallel(*args, **kwargs): + try: + func(*args, **kwargs) + except NodeCommunicationException as e: + report_list.append(node_communicator_exception_to_report_item(e)) + except LibraryError as e: + report_list.extend(e.args) + + tools.run_parallel(_parallel, param_list) + + if report_list: + raise LibraryError(*report_list) + + +def check_sbd(communicator, node, watchdog): + """ + Check SBD on specified 'node' and existence of specified watchdog. + + communicator -- NodeCommunicator + node -- NodeAddresses + watchdog -- watchdog path + """ + return communicator.call_node( + node, + "remote/check_sbd", + NodeCommunicator.format_data_dict([("watchdog", watchdog)]) + ) + + +def check_sbd_on_node(report_processor, node_communicator, node, watchdog): + """ + Check if SBD can be enabled on specified 'node'. + Raises LibraryError if check fails. + Raises NodeCommunicationException if there is communication issue. + + report_processor -- + node_communicator -- NodeCommunicator + node -- NodeAddresses + watchdog -- watchdog path + """ + report_list = [] + try: + data = json.loads(check_sbd(node_communicator, node, watchdog)) + if not data["sbd"]["installed"]: + report_list.append(reports.sbd_not_installed(node.label)) + if not data["watchdog"]["exist"]: + report_list.append(reports.watchdog_not_found(node.label, watchdog)) + except (ValueError, KeyError): + raise LibraryError(reports.invalid_response_format(node.label)) + + if report_list: + raise LibraryError(*report_list) + report_processor.process(reports.sbd_check_success(node.label)) + + +def check_sbd_on_all_nodes(report_processor, node_communicator, nodes_watchdog): + """ + Checks SBD (if SBD is installed and watchdog exists) on all NodeAddresses + defined as keys in data. + Raises LibraryError with all ReportItems in case of any failure. + + report_processor -- + node_communicator -- NodeCommunicator + nodes_watchdog -- dictionary with NodeAddresses as keys and watchdog path + as value + """ + report_processor.process(reports.sbd_check_started()) + _run_parallel_and_raise_lib_error_on_failure( + check_sbd_on_node, + [ + ([report_processor, node_communicator, node, watchdog], {}) + for node, watchdog in sorted(nodes_watchdog.items()) + ] + ) + + +def set_sbd_config(communicator, node, config): + """ + Send SBD configuration to 'node'. + + communicator -- NodeCommunicator + node -- NodeAddresses + config -- string, SBD configuration file + """ + communicator.call_node( + node, + "remote/set_sbd_config", + NodeCommunicator.format_data_dict([("config", config)]) + ) + + +def set_sbd_config_on_node(report_processor, node_communicator, node, config): + """ + Send SBD configuration to 'node'. Also puts correct node name into + SBD_OPTS option (SBD_OPTS="-n "). + + report_processor -- + node_communicator -- NodeCommunicator + node -- NodeAddresses + config -- dictionary in format: : + """ + config = dict(config) + config["SBD_OPTS"] = '"-n {node_name}"'.format(node_name=node.label) + set_sbd_config(node_communicator, node, dict_to_environment_file(config)) + report_processor.process( + reports.sbd_config_accepted_by_node(node.label) + ) + + +def set_sbd_config_on_all_nodes( + report_processor, node_communicator, node_list, config +): + """ + Send SBD configuration 'config' to all nodes in 'node_list'. Option + SBD_OPTS="-n " is added automatically. + Raises LibraryError with all ReportItems in case of any failure. + + report_processor -- + node_communicator -- NodeCommunicator + node_list -- NodeAddressesList + config -- dictionary in format: : + """ + report_processor.process(reports.sbd_config_distribution_started()) + _run_parallel_and_raise_lib_error_on_failure( + set_sbd_config_on_node, + [ + ([report_processor, node_communicator, node, config], {}) + for node in node_list + ] + ) + + +def enable_sbd_service(communicator, node): + """ + Enable SBD service on 'node'. + + communicator -- NodeCommunicator + node -- NodeAddresses + """ + communicator.call_node(node, "remote/sbd_enable", None) + + +def enable_sbd_service_on_node(report_processor, node_communicator, node): + """ + Enable SBD service on 'node'. + Returns list of ReportItem if there was any failure. Empty list otherwise. + + report_processor -- + node_communicator -- NodeCommunicator + node -- NodeAddresses + """ + enable_sbd_service(node_communicator, node) + report_processor.process(reports.service_enable_success("sbd", node.label)) + + +def enable_sbd_service_on_all_nodes( + report_processor, node_communicator, node_list +): + """ + Enable SBD service on all nodes in 'node_list'. + Raises LibraryError with all ReportItems in case of any failure. + + report_processor -- + node_communicator -- NodeCommunicator + node_list -- NodeAddressesList + """ + report_processor.process(reports.sbd_enabling_started()) + _run_parallel_and_raise_lib_error_on_failure( + enable_sbd_service_on_node, + [ + ([report_processor, node_communicator, node], {}) + for node in node_list + ] + ) + + +def disable_sbd_service(communicator, node): + """ + Disable SBD service on 'node'. + + communicator -- NodeCommunicator + node -- NodeAddresses + """ + communicator.call_node(node, "remote/sbd_disable", None) + + +def disable_sbd_service_on_node(report_processor, node_communicator, node): + """ + Disable SBD service on 'node'. + + report_processor -- + node_communicator -- NodeCommunicator + node -- NodeAddresses + """ + disable_sbd_service(node_communicator, node) + report_processor.process(reports.service_disable_success("sbd", node.label)) + + +def disable_sbd_service_on_all_nodes( + report_processor, node_communicator, node_list +): + """ + Disable SBD service on all nodes in 'node_list'. + Raises LibraryError with all ReportItems in case of any failure. + + report_processor -- + node_communicator -- NodeCommunicator + node_list -- NodeAddressesList + """ + report_processor.process(reports.sbd_disabling_started()) + _run_parallel_and_raise_lib_error_on_failure( + disable_sbd_service_on_node, + [ + ([report_processor, node_communicator, node], {}) + for node in node_list + ] + ) + + +def set_stonith_watchdog_timeout_to_zero(communicator, node): + """ + Set cluster property 'stonith-watchdog-timeout' to value '0' on 'node'. + + communicator -- NodeCommunicator + node -- NodeAddresses + """ + communicator.call_node( + node, "remote/set_stonith_watchdog_timeout_to_zero", None + ) + + +def set_stonith_watchdog_timeout_to_zero_on_all_nodes( + node_communicator, node_list +): + """ + Sets cluster property 'stonith-watchdog-timeout' to value '0' an all nodes + in 'node_list', even if cluster is not currently running on them (direct + editing CIB file). + Raises LibraryError with all ReportItems in case of any failure. + + node_communicator -- NodeCommunicator + node_list -- NodeAddressesList + """ + report_list = [] + for node in node_list: + try: + set_stonith_watchdog_timeout_to_zero(node_communicator, node) + except NodeCommunicationException as e: + report_list.append(node_communicator_exception_to_report_item(e)) + if report_list: + raise LibraryError(*report_list) + + +def remove_stonith_watchdog_timeout(communicator, node): + """ + Remove cluster property 'stonith-watchdog-timeout' on 'node'. + + communicator -- NodeCommunicator + node -- NodeAddresses + """ + communicator.call_node(node, "remote/remove_stonith_watchdog_timeout", None) + + +def remove_stonith_watchdog_timeout_on_all_nodes(node_communicator, node_list): + """ + Removes cluster property 'stonith-watchdog-timeout' from all nodes + in 'node_list', even if cluster is not currently running on them (direct + editing CIB file). + Raises LibraryError with all ReportItems in case of any failure. + + node_communicator -- NodeCommunicator + node_list -- NodeAddressesList + """ + report_list = [] + for node in node_list: + try: + remove_stonith_watchdog_timeout(node_communicator, node) + except NodeCommunicationException as e: + report_list.append(node_communicator_exception_to_report_item(e)) + if report_list: + raise LibraryError(*report_list) + + +def get_default_sbd_config(): + """ + Returns default SBD configuration as dictionary. + """ + return { + "SBD_DELAY_START": "no", + "SBD_PACEMAKER": "yes", + "SBD_STARTMODE": "clean", + "SBD_WATCHDOG_DEV": settings.sbd_watchdog_default, + "SBD_WATCHDOG_TIMEOUT": "5" + } + + +def get_local_sbd_config(): + """ + Get local SBD configuration. + Returns SBD configuration file as string. + Raises LibraryError on any failure. + """ + try: + with open(settings.sbd_config, "r") as sbd_cfg: + return sbd_cfg.read() + except EnvironmentError as e: + raise LibraryError(reports.unable_to_get_sbd_config( + "local node", str(e) + )) + + +def get_sbd_config(communicator, node): + """ + Get SBD configuration from 'node'. + Returns SBD configuration string. + + communicator -- NodeCommunicator + node -- NodeAddresses + """ + return communicator.call_node(node, "remote/get_sbd_config", None) + + +def is_sbd_enabled(runner): + """ + Check if SBD service is enabled in local system. + Return True if SBD service is enabled, False otherwise. + + runner -- CommandRunner + """ + return external.is_service_enabled(runner, "sbd") diff -Nru pcs-0.9.151/pcs/lib/tools.py pcs-0.9.153/pcs/lib/tools.py --- pcs-0.9.151/pcs/lib/tools.py 1970-01-01 00:00:00.000000000 +0000 +++ pcs-0.9.153/pcs/lib/tools.py 2016-07-01 13:31:39.000000000 +0000 @@ -0,0 +1,49 @@ +from __future__ import ( + absolute_import, + division, + print_function, + unicode_literals, +) + + +def environment_file_to_dict(config): + """ + Parse systemd Environment file. This parser is simplified version of + parser in systemd, because of their poor implementation. + Returns configuration in dictionary in format: + { +